Version 3.9.6
Fix template-related linker error. (issue 1936)
Allow inlining of functions containing object literals. (issue 1322)
Add --call-graph-size option to tickprocessor. (issue 1937)
Heap Snapshot maximum size limit is too low for really big apps. At the moment the limit is 256MB. (Chromium issue 113015)
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@10696 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 482cca8..4cad4c8 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,17 @@
+2012-02-14: Version 3.9.6
+
+ Fix template-related linker error. (issue 1936)
+
+ Allow inlining of functions containing object literals. (issue 1322)
+
+ Add --call-graph-size option to tickprocessor. (issue 1937)
+
+ Heap Snapshot maximum size limit is too low for really big apps. At the
+ moment the limit is 256MB. (Chromium issue 113015)
+
+ Performance and stability improvements on all platforms.
+
+
2012-02-09: Version 3.9.5
Removed unused command line flags.
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 50b6bce..993addc 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -313,7 +313,7 @@
Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
- has_non_smi_element;
+ has_non_smi_element, finish, cant_transition_map, not_double;
// Check for array construction with zero arguments or one.
__ cmp(r0, Operand(0, RelocInfo::NONE));
@@ -418,6 +418,8 @@
__ bind(&entry);
__ cmp(r4, r5);
__ b(lt, &loop);
+
+ __ bind(&finish);
__ mov(sp, r7);
// Remove caller arguments and receiver from the stack, setup return value and
@@ -430,8 +432,39 @@
__ Jump(lr);
__ bind(&has_non_smi_element);
+ // Double values are handled by the runtime.
+ __ CheckMap(
+ r2, r9, Heap::kHeapNumberMapRootIndex, ¬_double, DONT_DO_SMI_CHECK);
+ __ bind(&cant_transition_map);
__ UndoAllocationInNewSpace(r3, r4);
__ b(call_generic_code);
+
+ __ bind(¬_double);
+ // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+ // r3: JSArray
+ __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ r2,
+ r9,
+ &cant_transition_map);
+ __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ RecordWriteField(r3,
+ HeapObject::kMapOffset,
+ r2,
+ r9,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ Label loop2;
+ __ sub(r7, r7, Operand(kPointerSize));
+ __ bind(&loop2);
+ __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
+ __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
+ __ cmp(r4, r5);
+ __ b(lt, &loop2);
+ __ b(&finish);
}
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index c65f5bd..6b59602 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -3674,17 +3674,6 @@
}
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- __ Throw(r0);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- __ ThrowUncatchable(type, r0);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -3865,13 +3854,27 @@
true);
__ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+ // Set external caught exception to false.
+ Isolate* isolate = masm->isolate();
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate);
+ __ mov(r0, Operand(false, RelocInfo::NONE));
+ __ mov(r2, Operand(external_caught));
+ __ str(r0, MemOperand(r2));
+
+ // Set pending exception and r0 to out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ str(r0, MemOperand(r2));
+ // Fall through to the next label.
__ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
+ __ ThrowUncatchable(r0);
__ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
+ __ Throw(r0);
}
@@ -4912,10 +4915,10 @@
Label termination_exception;
__ b(eq, &termination_exception);
- __ Throw(r0); // Expects thrown value in r0.
+ __ Throw(r0);
__ bind(&termination_exception);
- __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0.
+ __ ThrowUncatchable(r0);
__ bind(&failure);
// For failure and exception return null.
@@ -7059,11 +7062,13 @@
{ r2, r1, r3, EMIT_REMEMBERED_SET },
{ r3, r1, r2, EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement.
- { r4, r2, r3, EMIT_REMEMBERED_SET },
+ { r3, r2, r4, EMIT_REMEMBERED_SET },
+ { r2, r3, r4, EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ r2, r3, r9, EMIT_REMEMBERED_SET },
+ { r2, r3, r9, OMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateDoubleToObject
{ r6, r2, r0, EMIT_REMEMBERED_SET },
{ r2, r6, r9, EMIT_REMEMBERED_SET },
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index ce35b97..506f9b2 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -90,11 +90,16 @@
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
- Label loop, entry, convert_hole, gc_required;
+ Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
- __ push(lr);
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ b(eq, &only_change_map);
+
+ __ push(lr);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedArray
// r5: number of elements (smi-tagged)
@@ -117,7 +122,7 @@
r9,
kLRHasBeenSaved,
kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
+ OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ add(r3, r6, Operand(kHeapObjectTag));
@@ -146,6 +151,18 @@
__ b(&entry);
+ __ bind(&only_change_map);
+ __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ RecordWriteField(r2,
+ HeapObject::kMapOffset,
+ r3,
+ r9,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ b(&done);
+
// Call into runtime if GC is required.
__ bind(&gc_required);
__ pop(lr);
@@ -194,6 +211,7 @@
if (!vfp3_supported) __ Pop(r1, r0);
__ pop(lr);
+ __ bind(&done);
}
@@ -207,10 +225,15 @@
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
- Label entry, loop, convert_hole, gc_required;
+ Label entry, loop, convert_hole, gc_required, only_change_map;
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ b(eq, &only_change_map);
__ push(lr);
- __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
__ Push(r3, r2, r1, r0);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedDoubleArray
@@ -280,16 +303,6 @@
__ b(lt, &loop);
__ Pop(r3, r2, r1, r0);
- // Update receiver's map.
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
- HeapObject::kMapOffset,
- r3,
- r9,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created and filled FixedArray.
__ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
__ RecordWriteField(r2,
@@ -301,6 +314,18 @@
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ pop(lr);
+
+ __ bind(&only_change_map);
+ // Update receiver's map.
+ __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ RecordWriteField(r2,
+ HeapObject::kMapOffset,
+ r3,
+ r9,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
}
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 64ca1a3..3fce55e 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -4431,11 +4431,12 @@
void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
- __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
+ // Set up the parameters to the stub/runtime call.
+ __ LoadHeapObject(r4, literals);
__ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
__ mov(r2, Operand(constant_properties));
int flags = instr->hydrogen()->fast_elements()
@@ -4444,7 +4445,7 @@
__ mov(r1, Operand(Smi::FromInt(flags)));
__ Push(r4, r3, r2, r1);
- // Pick the right runtime function to call.
+ // Pick the right runtime function or stub to call.
int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 2f0e5fa..a1a736e 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1281,8 +1281,7 @@
}
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
- Register value) {
+void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
@@ -1292,24 +1291,9 @@
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in r0.
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate());
- mov(r0, Operand(false, RelocInfo::NONE));
- mov(r2, Operand(external_caught));
- str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
- str(r0, MemOperand(r2));
- } else if (!value.is(r0)) {
+ if (!value.is(r0)) {
mov(r0, value);
}
-
// Drop the stack pointer to the top of the top stack handler.
mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
ldr(sp, MemOperand(r3));
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 45cca90..fad698e 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -588,12 +588,12 @@
// Must preserve the result register.
void PopTryHandler();
- // Passes thrown value (in r0) to the handler of top of the try handler chain.
+ // Passes thrown value to the handler of top of the try handler chain.
void Throw(Register value);
// Propagates an uncatchable exception to the top of the current JS stack's
// handler chain.
- void ThrowUncatchable(UncatchableExceptionType type, Register value);
+ void ThrowUncatchable(Register value);
// ---------------------------------------------------------------------------
// Inline caching support
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 2f2c5a8..9a0793e 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -3076,7 +3076,7 @@
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub =
- KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+ KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
__ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
@@ -4121,7 +4121,8 @@
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
- ElementsKind elements_kind) {
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -4130,13 +4131,16 @@
// -- r3 : scratch
// -- r4 : scratch (elements)
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, transition_elements_kind, grow, slow;
+ Label finish_store, check_capacity;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
- Register scratch = r3;
- Register elements_reg = r4;
+ Register scratch = r4;
+ Register elements_reg = r3;
+ Register length_reg = r5;
+ Register scratch2 = r6;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -4144,16 +4148,13 @@
// Check that the key is a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic);
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(value_reg, &transition_elements_kind);
+ }
// Check that the key is within bounds.
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
if (is_js_array) {
__ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
@@ -4161,10 +4162,21 @@
}
// Compare smis.
__ cmp(key_reg, scratch);
- __ b(hs, &miss_force_generic);
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ b(hs, &grow);
+ } else {
+ __ b(hs, &miss_force_generic);
+ }
+ // Make sure elements is a fast element array, not 'cow'.
+ __ CheckMap(elements_reg,
+ scratch,
+ Heap::kFixedArrayMapRootIndex,
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ __ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4202,12 +4214,80 @@
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Grow the array by a single element if possible.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags already set by previous compare.
+ __ b(ne, &miss_force_generic);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ ldr(length_reg,
+ FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
+ __ b(ne, &check_capacity);
+
+ int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
+ TAG_OBJECT);
+
+ __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+ __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
+ __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
+ __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
+ }
+
+ // Store the element at index zero.
+ __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
+
+ // Install the new backing store in the JSArray.
+ __ str(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
+ scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ mov(length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ Ret();
+
+ __ bind(&check_capacity);
+ // Check for cow elements, in general they are not handled by this stub
+ __ CheckMap(elements_reg,
+ scratch,
+ Heap::kFixedCOWArrayMapRootIndex,
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ __ cmp(length_reg, scratch);
+ __ b(hs, &slow);
+
+ // Grow the array and finish the store.
+ __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ jmp(&finish_store);
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+ }
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
- bool is_js_array) {
+ bool is_js_array,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -4217,7 +4297,8 @@
// -- r4 : scratch
// -- r5 : scratch
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, transition_elements_kind, grow, slow;
+ Label finish_store, check_capacity;
Register value_reg = r0;
Register key_reg = r1;
@@ -4227,6 +4308,7 @@
Register scratch2 = r5;
Register scratch3 = r6;
Register scratch4 = r7;
+ Register length_reg = r7;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -4245,8 +4327,13 @@
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
__ cmp(key_reg, scratch1);
- __ b(hs, &miss_force_generic);
+ if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ b(hs, &grow);
+ } else {
+ __ b(hs, &miss_force_generic);
+ }
+ __ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg,
key_reg,
receiver_reg,
@@ -4267,6 +4354,73 @@
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Grow the array by a single element if possible.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags already set by previous compare.
+ __ b(ne, &miss_force_generic);
+
+ // Transition on values that can't be stored in a FixedDoubleArray.
+ Label value_is_smi;
+ __ JumpIfSmi(value_reg, &value_is_smi);
+ __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
+ __ b(ne, &transition_elements_kind);
+ __ bind(&value_is_smi);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ ldr(length_reg,
+ FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
+ __ b(ne, &check_capacity);
+
+ int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
+ TAG_OBJECT);
+
+ // Initialize the new FixedDoubleArray. Leave elements unitialized for
+ // efficiency, they are guaranteed to be initialized before use.
+ __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
+ __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
+ __ mov(scratch1,
+ Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ str(scratch1,
+ FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+
+ // Install the new backing store in the JSArray.
+ __ str(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
+ scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ mov(length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ jmp(&finish_store);
+
+ __ bind(&check_capacity);
+ // Make sure that the backing store can hold additional elements.
+ __ ldr(scratch1,
+ FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+ __ cmp(length_reg, scratch1);
+ __ b(hs, &slow);
+
+ // Grow the array and finish the store.
+ __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
+ __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ jmp(&finish_store);
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ Jump(ic_slow, RelocInfo::CODE_TARGET);
+ }
}
diff --git a/src/ast.cc b/src/ast.cc
index 980dba6..7e886fa 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -1009,6 +1009,7 @@
INCREASE_NODE_COUNT(ReturnStatement)
INCREASE_NODE_COUNT(Conditional)
INCREASE_NODE_COUNT(Literal)
+INCREASE_NODE_COUNT(ObjectLiteral)
INCREASE_NODE_COUNT(Assignment)
INCREASE_NODE_COUNT(Throw)
INCREASE_NODE_COUNT(Property)
@@ -1112,12 +1113,6 @@
}
-void AstConstructionVisitor::VisitObjectLiteral(ObjectLiteral* node) {
- increase_node_count();
- add_flag(kDontInline); // TODO(1322): Allow materialized literals.
-}
-
-
void AstConstructionVisitor::VisitArrayLiteral(ArrayLiteral* node) {
increase_node_count();
add_flag(kDontInline); // TODO(1322): Allow materialized literals.
diff --git a/src/ast.h b/src/ast.h
index 7f81232..2031694 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -154,7 +154,7 @@
#define DECLARE_NODE_TYPE(type) \
virtual void Accept(AstVisitor* v); \
- virtual AstNode::Type node_type() const { return AstNode::k##type; } \
+ virtual AstNode::Type node_type() const { return AstNode::k##type; }
enum AstPropertiesFlag {
@@ -245,11 +245,6 @@
};
-#define DECLARE_NODE_TYPE(type) \
- virtual void Accept(AstVisitor* v); \
- virtual AstNode::Type node_type() const { return AstNode::k##type; } \
-
-
class Statement: public AstNode {
public:
Statement() : statement_pos_(RelocInfo::kNoPosition) {}
@@ -2050,6 +2045,8 @@
explicit ThisFunction(Isolate* isolate): Expression(isolate) {}
};
+#undef DECLARE_NODE_TYPE
+
// ----------------------------------------------------------------------------
// Regular expressions
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 31a771f..ef53df4 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1743,7 +1743,9 @@
Handle<DescriptorArray> array_descriptors(
array_function->initial_map()->instance_descriptors());
int index = array_descriptors->SearchWithCache(heap()->length_symbol());
- reresult_descriptors->CopyFrom(0, *array_descriptors, index, witness);
+ MaybeObject* copy_result =
+ reresult_descriptors->CopyFrom(0, *array_descriptors, index, witness);
+ if (copy_result->IsFailure()) return false;
int enum_index = 0;
{
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index dd1cc5e..11016c8 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -296,12 +296,14 @@
case FAST_SMI_ONLY_ELEMENTS: {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_,
- elements_kind_);
+ elements_kind_,
+ grow_mode_);
}
break;
case FAST_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
- is_js_array_);
+ is_js_array_,
+ grow_mode_);
break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
@@ -440,10 +442,13 @@
}
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_jsarray_,
- FAST_ELEMENTS);
+ FAST_ELEMENTS,
+ grow_mode_);
} else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) {
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
- KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, is_jsarray_);
+ KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
+ is_jsarray_,
+ grow_mode_);
} else {
UNREACHABLE();
}
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 78ff554..7b6ae5e 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -632,9 +632,6 @@
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope);
- void GenerateThrowTOS(MacroAssembler* masm);
- void GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type);
// Number of pointers/values returned.
const int result_size_;
@@ -985,20 +982,29 @@
class KeyedStoreElementStub : public CodeStub {
public:
KeyedStoreElementStub(bool is_js_array,
- ElementsKind elements_kind)
- : is_js_array_(is_js_array),
- elements_kind_(elements_kind) { }
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode)
+ : is_js_array_(is_js_array),
+ elements_kind_(elements_kind),
+ grow_mode_(grow_mode) { }
Major MajorKey() { return KeyedStoreElement; }
int MinorKey() {
- return (is_js_array_ ? 0 : kElementsKindCount) + elements_kind_;
+ return ElementsKindBits::encode(elements_kind_) |
+ IsJSArrayBits::encode(is_js_array_) |
+ GrowModeBits::encode(grow_mode_);
}
void Generate(MacroAssembler* masm);
private:
+ class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
+ class GrowModeBits: public BitField<KeyedAccessGrowMode, 8, 1> {};
+ class IsJSArrayBits: public BitField<bool, 9, 1> {};
+
bool is_js_array_;
ElementsKind elements_kind_;
+ KeyedAccessGrowMode grow_mode_;
DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
};
@@ -1076,24 +1082,28 @@
ElementsTransitionAndStoreStub(ElementsKind from,
ElementsKind to,
bool is_jsarray,
- StrictModeFlag strict_mode)
+ StrictModeFlag strict_mode,
+ KeyedAccessGrowMode grow_mode)
: from_(from),
to_(to),
is_jsarray_(is_jsarray),
- strict_mode_(strict_mode) {}
+ strict_mode_(strict_mode),
+ grow_mode_(grow_mode) {}
private:
- class FromBits: public BitField<ElementsKind, 0, 8> {};
- class ToBits: public BitField<ElementsKind, 8, 8> {};
- class IsJSArrayBits: public BitField<bool, 16, 8> {};
- class StrictModeBits: public BitField<StrictModeFlag, 24, 8> {};
+ class FromBits: public BitField<ElementsKind, 0, 8> {};
+ class ToBits: public BitField<ElementsKind, 8, 8> {};
+ class IsJSArrayBits: public BitField<bool, 16, 1> {};
+ class StrictModeBits: public BitField<StrictModeFlag, 17, 1> {};
+ class GrowModeBits: public BitField<KeyedAccessGrowMode, 18, 1> {};
Major MajorKey() { return ElementsTransitionAndStore; }
int MinorKey() {
return FromBits::encode(from_) |
ToBits::encode(to_) |
IsJSArrayBits::encode(is_jsarray_) |
- StrictModeBits::encode(strict_mode_);
+ StrictModeBits::encode(strict_mode_) |
+ GrowModeBits::encode(grow_mode_);
}
void Generate(MacroAssembler* masm);
@@ -1102,6 +1112,7 @@
ElementsKind to_;
bool is_jsarray_;
StrictModeFlag strict_mode_;
+ KeyedAccessGrowMode grow_mode_;
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
};
diff --git a/src/factory.cc b/src/factory.cc
index 5915f48..daf9d64 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -865,7 +865,7 @@
// Copy the descriptors from the array.
for (int i = 0; i < array->number_of_descriptors(); i++) {
if (!array->IsNullDescriptor(i)) {
- result->CopyFrom(descriptor_count++, *array, i, witness);
+ DescriptorArray::CopyFrom(result, descriptor_count++, array, i, witness);
}
}
@@ -899,7 +899,7 @@
Handle<DescriptorArray> new_result =
NewDescriptorArray(number_of_descriptors);
for (int i = 0; i < number_of_descriptors; i++) {
- new_result->CopyFrom(i, *result, i, witness);
+ DescriptorArray::CopyFrom(new_result, i, result, i, witness);
}
result = new_result;
}
diff --git a/src/heap.cc b/src/heap.cc
index b082886..ff31361 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -4562,7 +4562,7 @@
MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
int length,
PretenureFlag pretenure) {
- if (length == 0) return empty_fixed_double_array();
+ if (length == 0) return empty_fixed_array();
Object* elements_object;
MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
@@ -4579,7 +4579,7 @@
MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
int length,
PretenureFlag pretenure) {
- if (length == 0) return empty_fixed_double_array();
+ if (length == 0) return empty_fixed_array();
Object* elements_object;
MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
diff --git a/src/heap.h b/src/heap.h
index 83e9b61..32bf7a8 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -74,7 +74,6 @@
V(Map, hash_table_map, HashTableMap) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
- V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray) \
V(String, empty_string, EmptyString) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Smi, stack_limit, StackLimit) \
@@ -205,12 +204,10 @@
V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
V(KeyedLoadElementMonomorphic_symbol, \
"KeyedLoadElementMonomorphic") \
- V(KeyedLoadElementPolymorphic_symbol, \
- "KeyedLoadElementPolymorphic") \
V(KeyedStoreElementMonomorphic_symbol, \
"KeyedStoreElementMonomorphic") \
- V(KeyedStoreElementPolymorphic_symbol, \
- "KeyedStoreElementPolymorphic") \
+ V(KeyedStoreAndGrowElementMonomorphic_symbol, \
+ "KeyedStoreAndGrowElementMonomorphic") \
V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
V(illegal_access_symbol, "illegal access") \
V(out_of_memory_symbol, "out-of-memory") \
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index efa3456..f303db4 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1088,7 +1088,7 @@
bool construct_call,
Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
- empty_array, not_empty_array;
+ empty_array, not_empty_array, finish, cant_transition_map, not_double;
// Push the constructor and argc. No need to tag argc as a smi, as there will
// be no garbage collection with this on the stack.
@@ -1247,6 +1247,7 @@
// esp[8]: constructor (only if construct_call)
// esp[12]: return address
// esp[16]: last argument
+ __ bind(&finish);
__ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
__ pop(eax);
__ pop(ebx);
@@ -1255,9 +1256,43 @@
__ jmp(ecx);
__ bind(&has_non_smi_element);
+ // Double values are handled by the runtime.
+ __ CheckMap(eax,
+ masm->isolate()->factory()->heap_number_map(),
+ ¬_double,
+ DONT_DO_SMI_CHECK);
+ __ bind(&cant_transition_map);
// Throw away the array that's only been partially constructed.
__ pop(eax);
__ UndoAllocationInNewSpace(eax);
+ __ jmp(&prepare_generic_code_call);
+
+ __ bind(¬_double);
+ // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+ __ mov(ebx, Operand(esp, 0));
+ __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(
+ FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ edi,
+ eax,
+ &cant_transition_map);
+ __ mov(FieldOperand(ebx, HeapObject::kMapOffset), edi);
+ __ RecordWriteField(ebx, HeapObject::kMapOffset, edi, eax,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Prepare to re-enter the loop
+ __ lea(edi, Operand(esp, last_arg_offset));
+
+ // Finish the array initialization loop.
+ Label loop2;
+ __ bind(&loop2);
+ __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
+ __ mov(Operand(edx, 0), eax);
+ __ add(edx, Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(greater_equal, &loop2);
+ __ jmp(&finish);
// Restore argc and constructor before running the generic code.
__ bind(&prepare_generic_code_call);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index b3a0b95..ed37910 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -3922,7 +3922,7 @@
__ Throw(eax);
__ bind(&throw_termination_exception);
- __ ThrowUncatchable(TERMINATION, eax);
+ __ ThrowUncatchable(eax);
__ bind(&failure);
// For failure to match, return null.
@@ -4780,11 +4780,6 @@
}
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- __ Throw(eax);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -4903,12 +4898,6 @@
}
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- __ ThrowUncatchable(type, eax);
-}
-
-
void CEntryStub::Generate(MacroAssembler* masm) {
// eax: number of arguments including receiver
// ebx: pointer to C function (C callee-saved)
@@ -4962,13 +4951,24 @@
true);
__ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+ // Set external caught exception to false.
+ Isolate* isolate = masm->isolate();
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate);
+ __ mov(Operand::StaticVariable(external_caught), Immediate(false));
+
+ // Set pending exception and eax to out of memory exception.
+ ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
+ isolate);
+ __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+ __ mov(Operand::StaticVariable(pending_exception), eax);
+ // Fall through to the next label.
__ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
+ __ ThrowUncatchable(eax);
__ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
+ __ Throw(eax);
}
@@ -7041,11 +7041,13 @@
// KeyedStoreIC::GenerateGeneric.
{ ebx, edx, ecx, EMIT_REMEMBERED_SET},
// KeyedStoreStubCompiler::GenerateStoreFastElement.
- { edi, edx, ecx, EMIT_REMEMBERED_SET},
+ { edi, ebx, ecx, EMIT_REMEMBERED_SET},
+ { edx, edi, ebx, EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ edx, ebx, edi, EMIT_REMEMBERED_SET},
+ { edx, ebx, edi, OMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateDoubleToObject
{ eax, edx, esi, EMIT_REMEMBERED_SET},
{ edx, eax, edi, EMIT_REMEMBERED_SET},
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index e5ca02c..3e085a2 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -301,11 +301,17 @@
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label loop, entry, convert_hole, gc_required;
+ Label loop, entry, convert_hole, gc_required, only_change_map;
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+ __ j(equal, &only_change_map);
+
__ push(eax);
__ push(ebx);
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray.
@@ -399,6 +405,11 @@
__ pop(ebx);
__ pop(eax);
+
+ // Restore esi.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&only_change_map);
// eax: value
// ebx: target map
// Set transitioned map.
@@ -408,10 +419,8 @@
ebx,
edi,
kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
+ OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- // Restore esi.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
@@ -424,12 +433,18 @@
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label loop, entry, convert_hole, gc_required;
+ Label loop, entry, convert_hole, gc_required, only_change_map, success;
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+ __ j(equal, &only_change_map);
+
__ push(eax);
__ push(edx);
__ push(ebx);
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
// Allocate new FixedArray.
@@ -446,6 +461,20 @@
__ jmp(&entry);
+ // ebx: target map
+ // edx: receiver
+ // Set transitioned map.
+ __ bind(&only_change_map);
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
+ __ RecordWriteField(edx,
+ HeapObject::kMapOffset,
+ ebx,
+ edi,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&success);
+
// Call into runtime if GC is required.
__ bind(&gc_required);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -507,7 +536,7 @@
ebx,
edi,
kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
+ OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created and filled FixedArray.
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
@@ -522,6 +551,8 @@
// Restore registers.
__ pop(eax);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&success);
}
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index dbb0554..33f247a 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1639,6 +1639,9 @@
__ pop(ebx);
__ push(edx);
__ push(ebx); // return address
+ // Leaving the code managed by the register allocator and return to the
+ // convention of using esi as context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
}
@@ -1662,6 +1665,9 @@
__ pop(ebx);
__ push(edx);
__ push(ebx); // return address
+ // Leaving the code managed by the register allocator and return to the
+ // convention of using esi as context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
}
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 5a276f4..557e6db 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -4312,12 +4312,12 @@
void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
// Set up the parameters to the stub/runtime call.
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
+ __ PushHeapObject(literals);
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(constant_properties));
int flags = instr->hydrogen()->fast_elements()
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 9986c3e..e63a065 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -862,8 +862,7 @@
}
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
- Register value) {
+void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
@@ -873,21 +872,9 @@
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in eax.
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate());
- mov(Operand::StaticVariable(external_caught), Immediate(false));
-
- // Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate());
- mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
- mov(Operand::StaticVariable(pending_exception), eax);
- } else if (!value.is(eax)) {
+ if (!value.is(eax)) {
mov(eax, value);
}
-
// Drop the stack pointer to the top of the top stack handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
mov(esp, Operand::StaticVariable(handler_address));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index b06d801..ae9bb5a 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -496,10 +496,11 @@
// Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler();
- // Activate the top handler in the try hander chain.
+ // Throw to the top handler in the try hander chain.
void Throw(Register value);
- void ThrowUncatchable(UncatchableExceptionType type, Register value);
+ // Throw past all JS frames to the top JS entry frame.
+ void ThrowUncatchable(Register value);
// ---------------------------------------------------------------------------
// Inline caching support
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 9717869..1e91029 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -2591,7 +2591,7 @@
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub =
- KeyedStoreElementStub(is_jsarray, elements_kind).GetCode();
+ KeyedStoreElementStub(is_jsarray, elements_kind, grow_mode_).GetCode();
__ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
@@ -3718,14 +3718,16 @@
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
- ElementsKind elements_kind) {
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, grow, slow, transition_elements_kind;
+ Label check_capacity, prepare_slow, finish_store, commit_backing_store;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3733,24 +3735,32 @@
// Check that the key is a smi.
__ JumpIfNotSmi(ecx, &miss_force_generic);
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(eax, &transition_elements_kind);
+ }
+
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ j(not_equal, &miss_force_generic);
-
if (is_js_array) {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
- __ j(above_equal, &miss_force_generic);
+ if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ j(above_equal, &grow);
+ } else {
+ __ j(above_equal, &miss_force_generic);
+ }
} else {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
__ j(above_equal, &miss_force_generic);
}
+ __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_array_map()));
+ __ j(not_equal, &miss_force_generic);
+
+ __ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- __ JumpIfNotSmi(eax, &transition_elements_kind);
// ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size
__ mov(FieldOperand(edi,
@@ -3768,8 +3778,8 @@
FixedArray::kHeaderSize));
__ mov(Operand(ecx, 0), eax);
// Make sure to preserve the value in register eax.
- __ mov(edx, eax);
- __ RecordWrite(edi, ecx, edx, kDontSaveFPRegs);
+ __ mov(ebx, eax);
+ __ RecordWrite(edi, ecx, ebx, kDontSaveFPRegs);
}
// Done.
@@ -3785,19 +3795,94 @@
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Handle transition requiring the array to grow.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags are already set by previous
+ // compare.
+ __ j(not_equal, &miss_force_generic);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+ __ j(not_equal, &check_capacity);
+
+ int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
+ // Restore the key, which is known to be the array length.
+
+ // eax: value
+ // ecx: key
+ // edx: receiver
+ // edi: elements
+ // Make sure that the backing store can hold additional elements.
+ __ mov(FieldOperand(edi, JSObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_array_map()));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset),
+ Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ mov(ebx, Immediate(masm->isolate()->factory()->the_hole_value()));
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
+ __ mov(FieldOperand(edi, FixedArray::SizeFor(i)), ebx);
+ }
+
+ // Store the element at index zero.
+ __ mov(FieldOperand(edi, FixedArray::SizeFor(0)), eax);
+
+ // Install the new backing store in the JSArray.
+ __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
+ __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ mov(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ ret(0);
+
+ __ bind(&check_capacity);
+ __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_cow_array_map()));
+ __ j(equal, &miss_force_generic);
+
+ // eax: value
+ // ecx: key
+ // edx: receiver
+ // edi: elements
+ // Make sure that the backing store can hold additional elements.
+ __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+ __ j(above_equal, &slow);
+
+ // Grow the array and finish the store.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ jmp(&finish_store);
+
+ __ bind(&prepare_slow);
+ // Restore the key, which is known to be the array length.
+ __ mov(ecx, Immediate(0));
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ jmp(ic_slow, RelocInfo::CODE_TARGET);
+ }
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
- bool is_js_array) {
+ bool is_js_array,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, transition_elements_kind, grow, slow;
+ Label check_capacity, prepare_slow, finish_store, commit_backing_store;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3812,19 +3897,19 @@
if (is_js_array) {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
+ if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ j(above_equal, &grow);
+ } else {
+ __ j(above_equal, &miss_force_generic);
+ }
} else {
// Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
}
- __ j(above_equal, &miss_force_generic);
- __ StoreNumberToDoubleElements(eax,
- edi,
- ecx,
- edx,
- xmm0,
- &transition_elements_kind,
- true);
+ __ bind(&finish_store);
+ __ StoreNumberToDoubleElements(eax, edi, ecx, edx, xmm0,
+ &transition_elements_kind, true);
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
@@ -3837,6 +3922,78 @@
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Handle transition requiring the array to grow.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags are already set by previous
+ // compare.
+ __ j(not_equal, &miss_force_generic);
+
+ // Transition on values that can't be stored in a FixedDoubleArray.
+ Label value_is_smi;
+ __ JumpIfSmi(eax, &value_is_smi);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
+ __ j(not_equal, &transition_elements_kind);
+ __ bind(&value_is_smi);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
+ __ j(not_equal, &check_capacity);
+
+ int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
+ // Restore the key, which is known to be the array length.
+ __ mov(ecx, Immediate(0));
+
+ // eax: value
+ // ecx: key
+ // edx: receiver
+ // edi: elements
+ // Initialize the new FixedDoubleArray. Leave elements unitialized for
+ // efficiency, they are guaranteed to be initialized before use.
+ __ mov(FieldOperand(edi, JSObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->fixed_double_array_map()));
+ __ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset),
+ Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+
+ // Install the new backing store in the JSArray.
+ __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
+ __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ jmp(&finish_store);
+
+ __ bind(&check_capacity);
+ // eax: value
+ // ecx: key
+ // edx: receiver
+ // edi: elements
+ // Make sure that the backing store can hold additional elements.
+ __ cmp(ecx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
+ __ j(above_equal, &slow);
+
+ // Grow the array and finish the store.
+ __ add(FieldOperand(edx, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(1)));
+ __ jmp(&finish_store);
+
+ __ bind(&prepare_slow);
+ // Restore the key, which is known to be the array length.
+ __ mov(ecx, Immediate(0));
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ jmp(ic_slow, RelocInfo::CODE_TARGET);
+ }
}
diff --git a/src/ic-inl.h b/src/ic-inl.h
index 4daf944..7467881 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -85,7 +85,8 @@
Code* old_target = GetTargetAtAddress(address);
if (old_target->kind() == Code::STORE_IC ||
old_target->kind() == Code::KEYED_STORE_IC) {
- ASSERT(old_target->extra_ic_state() == target->extra_ic_state());
+ ASSERT(Code::GetStrictMode(old_target->extra_ic_state()) ==
+ Code::GetStrictMode(target->extra_ic_state()));
}
#endif
Assembler::set_target_address_at(address, target->instruction_start());
diff --git a/src/ic.cc b/src/ic.cc
index 9846984..f774b92 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -81,9 +81,13 @@
}
}
JavaScriptFrame::PrintTop(stdout, false, true);
- PrintF(" (%c->%c)",
+ bool new_can_grow =
+ Code::GetKeyedAccessGrowMode(new_target->extra_ic_state()) ==
+ ALLOW_JSARRAY_GROWTH;
+ PrintF(" (%c->%c%s)",
TransitionMarkFromState(old_state),
- TransitionMarkFromState(new_state));
+ TransitionMarkFromState(new_state),
+ new_can_grow ? ".GROW" : "");
name->Print();
PrintF("]\n");
}
@@ -375,7 +379,7 @@
void StoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address,
- (target->extra_ic_state() == kStrictMode)
+ (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
? initialize_stub_strict()
: initialize_stub());
}
@@ -384,7 +388,7 @@
void KeyedStoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address,
- (target->extra_ic_state() == kStrictMode)
+ (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
? initialize_stub_strict()
: initialize_stub());
}
@@ -996,19 +1000,22 @@
Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck(
bool is_js_array,
- ElementsKind elements_kind) {
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode) {
+ ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH);
return KeyedLoadElementStub(elements_kind).GetCode();
}
Handle<Code> KeyedLoadIC::ComputePolymorphicStub(
MapHandleList* receiver_maps,
- StrictModeFlag strict_mode) {
+ StrictModeFlag strict_mode,
+ KeyedAccessGrowMode growth_mode) {
CodeHandleList handler_ics(receiver_maps->length());
for (int i = 0; i < receiver_maps->length(); ++i) {
Handle<Map> receiver_map = receiver_maps->at(i);
Handle<Code> cached_stub = ComputeMonomorphicStubWithoutMapCheck(
- receiver_map, strict_mode);
+ receiver_map, strict_mode, growth_mode);
handler_ics.Add(cached_stub);
}
KeyedLoadStubCompiler compiler(isolate());
@@ -1493,12 +1500,9 @@
StrictModeFlag strict_mode,
Handle<Code> generic_stub) {
State ic_state = target()->ic_state();
- if ((ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) &&
- !IsTransitionStubKind(stub_kind)) {
- return ComputeMonomorphicStub(
- receiver, stub_kind, strict_mode, generic_stub);
- }
- ASSERT(target() != *generic_stub);
+ KeyedAccessGrowMode grow_mode = IsGrowStubKind(stub_kind)
+ ? ALLOW_JSARRAY_GROWTH
+ : DO_NOT_ALLOW_JSARRAY_GROWTH;
// Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
// via megamorphic stubs, since they don't have a map in their relocation info
@@ -1508,15 +1512,39 @@
return generic_stub;
}
- // Determine the list of receiver maps that this call site has seen,
- // adding the map that was just encountered.
+ bool monomorphic = false;
MapHandleList target_receiver_maps;
- Handle<Map> receiver_map(receiver->map());
- if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
- target_receiver_maps.Add(receiver_map);
- } else {
+ if (ic_state != UNINITIALIZED && ic_state != PREMONOMORPHIC) {
GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
}
+ if (!IsTransitionStubKind(stub_kind)) {
+ if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+ monomorphic = true;
+ } else {
+ if (ic_state == MONOMORPHIC) {
+ // The first time a receiver is seen that is a transitioned version of
+ // the previous monomorphic receiver type, assume the new ElementsKind
+ // is the monomorphic type. This benefits global arrays that only
+ // transition once, and all call sites accessing them are faster if they
+ // remain monomorphic. If this optimistic assumption is not true, the IC
+ // will miss again and it will become polymorphic and support both the
+ // untransitioned and transitioned maps.
+ monomorphic = IsMoreGeneralElementsKindTransition(
+ target_receiver_maps.at(0)->elements_kind(),
+ receiver->GetElementsKind());
+ }
+ }
+ }
+
+ if (monomorphic) {
+ return ComputeMonomorphicStub(
+ receiver, stub_kind, strict_mode, generic_stub);
+ }
+ ASSERT(target() != *generic_stub);
+
+ // Determine the list of receiver maps that this call site has seen,
+ // adding the map that was just encountered.
+ Handle<Map> receiver_map(receiver->map());
bool map_added =
AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
if (IsTransitionStubKind(stub_kind)) {
@@ -1537,14 +1565,21 @@
return generic_stub;
}
+ if ((Code::GetKeyedAccessGrowMode(target()->extra_ic_state()) ==
+ ALLOW_JSARRAY_GROWTH)) {
+ grow_mode = ALLOW_JSARRAY_GROWTH;
+ }
+
Handle<PolymorphicCodeCache> cache =
isolate()->factory()->polymorphic_code_cache();
- Code::Flags flags = Code::ComputeFlags(kind(), MEGAMORPHIC, strict_mode);
+ Code::ExtraICState extra_state = Code::ComputeExtraICState(grow_mode,
+ strict_mode);
+ Code::Flags flags = Code::ComputeFlags(kind(), MEGAMORPHIC, extra_state);
Handle<Object> probe = cache->Lookup(&target_receiver_maps, flags);
if (probe->IsCode()) return Handle<Code>::cast(probe);
Handle<Code> stub =
- ComputePolymorphicStub(&target_receiver_maps, strict_mode);
+ ComputePolymorphicStub(&target_receiver_maps, strict_mode, grow_mode);
PolymorphicCodeCache::Update(cache, &target_receiver_maps, flags, stub);
return stub;
}
@@ -1552,7 +1587,8 @@
Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
Handle<Map> receiver_map,
- StrictModeFlag strict_mode) {
+ StrictModeFlag strict_mode,
+ KeyedAccessGrowMode grow_mode) {
if ((receiver_map->instance_type() & kNotStringTag) == 0) {
ASSERT(!string_stub().is_null());
return string_stub();
@@ -1564,7 +1600,8 @@
receiver_map->has_external_array_elements());
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
return GetElementStubWithoutMapCheck(is_js_array,
- receiver_map->elements_kind());
+ receiver_map->elements_kind(),
+ grow_mode);
}
}
@@ -1591,9 +1628,12 @@
switch (stub_kind) {
case KeyedIC::STORE_TRANSITION_SMI_TO_OBJECT:
case KeyedIC::STORE_TRANSITION_DOUBLE_TO_OBJECT:
+ case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
+ case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
break;
case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
+ case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
break;
default:
@@ -1605,13 +1645,16 @@
Handle<Code> KeyedStoreIC::GetElementStubWithoutMapCheck(
bool is_js_array,
- ElementsKind elements_kind) {
- return KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode) {
+ return KeyedStoreElementStub(is_js_array, elements_kind, grow_mode).GetCode();
}
-Handle<Code> KeyedStoreIC::ComputePolymorphicStub(MapHandleList* receiver_maps,
- StrictModeFlag strict_mode) {
+Handle<Code> KeyedStoreIC::ComputePolymorphicStub(
+ MapHandleList* receiver_maps,
+ StrictModeFlag strict_mode,
+ KeyedAccessGrowMode grow_mode) {
// Collect MONOMORPHIC stubs for all target_receiver_maps.
CodeHandleList handler_ics(receiver_maps->length());
MapHandleList transitioned_maps(receiver_maps->length());
@@ -1625,16 +1668,17 @@
receiver_map->elements_kind(), // original elements_kind
transitioned_map->elements_kind(),
receiver_map->instance_type() == JS_ARRAY_TYPE, // is_js_array
- strict_mode).GetCode();
+ strict_mode, grow_mode).GetCode();
} else {
cached_stub = ComputeMonomorphicStubWithoutMapCheck(receiver_map,
- strict_mode);
+ strict_mode,
+ grow_mode);
}
ASSERT(!cached_stub.is_null());
handler_ics.Add(cached_stub);
transitioned_maps.Add(transitioned_map);
}
- KeyedStoreStubCompiler compiler(isolate(), strict_mode);
+ KeyedStoreStubCompiler compiler(isolate(), strict_mode, grow_mode);
Handle<Code> code = compiler.CompileStorePolymorphic(
receiver_maps, &handler_ics, &transitioned_maps);
isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
@@ -1644,6 +1688,48 @@
}
+KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
+ Handle<Object> key,
+ Handle<Object> value) {
+ ASSERT(key->IsSmi());
+ int index = Smi::cast(*key)->value();
+ bool allow_growth = receiver->IsJSArray() &&
+ JSArray::cast(*receiver)->length()->IsSmi() &&
+ index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
+
+ if (allow_growth) {
+ // Handle growing array in stub if necessary.
+ if (receiver->HasFastSmiOnlyElements()) {
+ if (value->IsHeapNumber()) {
+ return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
+ }
+ if (value->IsHeapObject()) {
+ return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
+ }
+ } else if (receiver->HasFastDoubleElements()) {
+ if (!value->IsSmi() && !value->IsHeapNumber()) {
+ return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
+ }
+ }
+ return STORE_AND_GROW_NO_TRANSITION;
+ } else {
+ // Handle only in-bounds elements accesses.
+ if (receiver->HasFastSmiOnlyElements()) {
+ if (value->IsHeapNumber()) {
+ return STORE_TRANSITION_SMI_TO_DOUBLE;
+ } else if (value->IsHeapObject()) {
+ return STORE_TRANSITION_SMI_TO_OBJECT;
+ }
+ } else if (receiver->HasFastDoubleElements()) {
+ if (!value->IsSmi() && !value->IsHeapNumber()) {
+ return STORE_TRANSITION_DOUBLE_TO_OBJECT;
+ }
+ }
+ return STORE_NO_TRANSITION;
+ }
+}
+
+
MaybeObject* KeyedStoreIC::Store(State state,
StrictModeFlag strict_mode,
Handle<Object> object,
@@ -1706,18 +1792,7 @@
stub = non_strict_arguments_stub();
} else if (!force_generic) {
if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
- StubKind stub_kind = STORE_NO_TRANSITION;
- if (receiver->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
- if (value->IsHeapNumber()) {
- stub_kind = STORE_TRANSITION_SMI_TO_DOUBLE;
- } else if (value->IsHeapObject()) {
- stub_kind = STORE_TRANSITION_SMI_TO_OBJECT;
- }
- } else if (receiver->GetElementsKind() == FAST_DOUBLE_ELEMENTS) {
- if (!value->IsSmi() && !value->IsHeapNumber()) {
- stub_kind = STORE_TRANSITION_DOUBLE_TO_OBJECT;
- }
- }
+ StubKind stub_kind = GetStubKind(receiver, key, value);
stub = ComputeStub(receiver, stub_kind, strict_mode, stub);
}
} else {
@@ -1900,7 +1975,7 @@
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state,
- static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
+ Code::GetStrictMode(extra_ic_state),
args.at<Object>(0),
args.at<String>(1),
args.at<Object>(2));
@@ -1976,7 +2051,7 @@
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state,
- static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
+ Code::GetStrictMode(extra_ic_state),
args.at<Object>(0),
args.at<Object>(1),
args.at<Object>(2),
@@ -1992,8 +2067,7 @@
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- StrictModeFlag strict_mode =
- static_cast<StrictModeFlag>(extra_ic_state & kStrictMode);
+ StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
return Runtime::SetObjectProperty(isolate,
object,
key,
@@ -2010,7 +2084,7 @@
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state,
- static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
+ Code::GetStrictMode(extra_ic_state),
args.at<Object>(0),
args.at<Object>(1),
args.at<Object>(2),
diff --git a/src/ic.h b/src/ic.h
index d2c98c0..5711a48 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -377,14 +377,48 @@
STORE_NO_TRANSITION,
STORE_TRANSITION_SMI_TO_OBJECT,
STORE_TRANSITION_SMI_TO_DOUBLE,
- STORE_TRANSITION_DOUBLE_TO_OBJECT
+ STORE_TRANSITION_DOUBLE_TO_OBJECT,
+ STORE_AND_GROW_NO_TRANSITION,
+ STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
+ STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
+ STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT
};
+
+ static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
+ STORE_NO_TRANSITION;
+ STATIC_ASSERT(kGrowICDelta ==
+ STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT -
+ STORE_TRANSITION_SMI_TO_OBJECT);
+ STATIC_ASSERT(kGrowICDelta ==
+ STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE -
+ STORE_TRANSITION_SMI_TO_DOUBLE);
+ STATIC_ASSERT(kGrowICDelta ==
+ STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT -
+ STORE_TRANSITION_DOUBLE_TO_OBJECT);
+
explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
virtual ~KeyedIC() {}
+ static inline KeyedAccessGrowMode GetGrowModeFromStubKind(
+ StubKind stub_kind) {
+ return (stub_kind >= STORE_AND_GROW_NO_TRANSITION)
+ ? ALLOW_JSARRAY_GROWTH
+ : DO_NOT_ALLOW_JSARRAY_GROWTH;
+ }
+
+ static inline StubKind GetGrowStubKind(StubKind stub_kind) {
+ ASSERT(stub_kind != LOAD);
+ if (stub_kind < STORE_AND_GROW_NO_TRANSITION) {
+ stub_kind = static_cast<StubKind>(static_cast<int>(stub_kind) +
+ kGrowICDelta);
+ }
+ return stub_kind;
+ }
+
virtual Handle<Code> GetElementStubWithoutMapCheck(
bool is_js_array,
- ElementsKind elements_kind) = 0;
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode) = 0;
protected:
virtual Handle<Code> string_stub() {
@@ -398,12 +432,15 @@
StrictModeFlag strict_mode,
Handle<Code> default_stub);
- virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
- StrictModeFlag strict_mode) = 0;
+ virtual Handle<Code> ComputePolymorphicStub(
+ MapHandleList* receiver_maps,
+ StrictModeFlag strict_mode,
+ KeyedAccessGrowMode grow_mode) = 0;
Handle<Code> ComputeMonomorphicStubWithoutMapCheck(
Handle<Map> receiver_map,
- StrictModeFlag strict_mode);
+ StrictModeFlag strict_mode,
+ KeyedAccessGrowMode grow_mode);
private:
void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result);
@@ -417,7 +454,12 @@
StubKind stub_kind);
static bool IsTransitionStubKind(StubKind stub_kind) {
- return stub_kind > STORE_NO_TRANSITION;
+ return stub_kind > STORE_NO_TRANSITION &&
+ stub_kind != STORE_AND_GROW_NO_TRANSITION;
+ }
+
+ static bool IsGrowStubKind(StubKind stub_kind) {
+ return stub_kind >= STORE_AND_GROW_NO_TRANSITION;
}
};
@@ -456,7 +498,8 @@
virtual Handle<Code> GetElementStubWithoutMapCheck(
bool is_js_array,
- ElementsKind elements_kind);
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode);
virtual bool IsGeneric() const {
return target() == *generic_stub();
@@ -466,7 +509,8 @@
virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
- StrictModeFlag strict_mode);
+ StrictModeFlag strict_mode,
+ KeyedAccessGrowMode grow_mode);
virtual Handle<Code> string_stub() {
return isolate()->builtins()->KeyedLoadIC_String();
@@ -540,8 +584,8 @@
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
- ASSERT((code->extra_ic_state() & kStrictMode) ==
- (target()->extra_ic_state() & kStrictMode));
+ ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
+ Code::GetStrictMode(target()->extra_ic_state()));
IC::set_target(code);
}
@@ -603,7 +647,8 @@
virtual Handle<Code> GetElementStubWithoutMapCheck(
bool is_js_array,
- ElementsKind elements_kind);
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode);
virtual bool IsGeneric() const {
return target() == *generic_stub() ||
@@ -614,7 +659,8 @@
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
- StrictModeFlag strict_mode);
+ StrictModeFlag strict_mode,
+ KeyedAccessGrowMode grow_mode);
private:
// Update the inline cache.
@@ -627,8 +673,8 @@
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
- ASSERT((code->extra_ic_state() & kStrictMode) ==
- (target()->extra_ic_state() & kStrictMode));
+ ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
+ Code::GetStrictMode(target()->extra_ic_state()));
IC::set_target(code);
}
@@ -659,6 +705,10 @@
static void Clear(Address address, Code* target);
+ StubKind GetStubKind(Handle<JSObject> receiver,
+ Handle<Object> key,
+ Handle<Object> value);
+
friend class IC;
};
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 7d4bbbc..11e2217 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -36,13 +36,6 @@
};
-// Types of uncatchable exceptions.
-enum UncatchableExceptionType {
- OUT_OF_MEMORY,
- TERMINATION
-};
-
-
// Invalid depth in prototype chain.
const int kInvalidProtoDepth = -1;
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 259df21..cc11235 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -321,7 +321,7 @@
Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
- has_non_smi_element;
+ has_non_smi_element, finish, cant_transition_map, not_double;
// Check for array construction with zero arguments or one.
__ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
@@ -417,14 +417,16 @@
__ mov(t3, sp);
__ bind(&loop);
__ lw(a2, MemOperand(t3));
- __ Addu(t3, t3, kPointerSize);
if (FLAG_smi_only_arrays) {
__ JumpIfNotSmi(a2, &has_non_smi_element);
}
+ __ Addu(t3, t3, kPointerSize);
__ Addu(t1, t1, -kPointerSize);
__ sw(a2, MemOperand(t1));
__ bind(&entry);
__ Branch(&loop, lt, t0, Operand(t1));
+
+ __ bind(&finish);
__ mov(sp, t3);
// Remove caller arguments and receiver from the stack, setup return value and
@@ -437,8 +439,39 @@
__ Ret();
__ bind(&has_non_smi_element);
+ // Double values are handled by the runtime.
+ __ CheckMap(
+ a2, t5, Heap::kHeapNumberMapRootIndex, ¬_double, DONT_DO_SMI_CHECK);
+ __ bind(&cant_transition_map);
__ UndoAllocationInNewSpace(a3, t0);
- __ b(call_generic_code);
+ __ Branch(call_generic_code);
+
+ __ bind(¬_double);
+ // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+ // a3: JSArray
+ __ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ a2,
+ t5,
+ &cant_transition_map);
+ __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ RecordWriteField(a3,
+ HeapObject::kMapOffset,
+ a2,
+ t5,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ Label loop2;
+ __ bind(&loop2);
+ __ lw(a2, MemOperand(t3));
+ __ Addu(t3, t3, kPointerSize);
+ __ Subu(t1, t1, kPointerSize);
+ __ sw(a2, MemOperand(t1));
+ __ Branch(&loop2, lt, t0, Operand(t1));
+ __ Branch(&finish);
}
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 852b3c9..e65886b 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -3832,17 +3832,6 @@
}
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- __ Throw(v0);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- __ ThrowUncatchable(type, v0);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -4033,13 +4022,27 @@
true);
__ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+ // Set external caught exception to false.
+ Isolate* isolate = masm->isolate();
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate);
+ __ li(a0, Operand(false, RelocInfo::NONE));
+ __ li(a2, Operand(external_caught));
+ __ sw(a0, MemOperand(a2));
+
+ // Set pending exception and v0 to out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
+ __ sw(v0, MemOperand(a2));
+ // Fall through to the next label.
__ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
+ __ ThrowUncatchable(v0);
__ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
+ __ Throw(v0);
}
@@ -5133,10 +5136,10 @@
Label termination_exception;
__ Branch(&termination_exception, eq, v0, Operand(a0));
- __ Throw(v0); // Expects thrown value in v0.
+ __ Throw(v0);
__ bind(&termination_exception);
- __ ThrowUncatchable(TERMINATION, v0); // Expects thrown value in v0.
+ __ ThrowUncatchable(v0);
__ bind(&failure);
// For failure and exception return null.
@@ -6058,25 +6061,23 @@
// Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
// safe in this case.
- __ UntagAndJumpIfSmi(a2, a2, &runtime);
- __ UntagAndJumpIfSmi(a3, a3, &runtime);
-
+ __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
+ __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
// Both a2 and a3 are untagged integers.
__ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
- __ subu(a2, t5, a3);
- __ Branch(&runtime, gt, a3, Operand(t5)); // Fail if from > to.
+ __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
+ __ Subu(a2, a2, a3);
// Make sure first argument is a string.
__ lw(v0, MemOperand(sp, kStringOffset));
- __ Branch(&runtime, eq, v0, Operand(kSmiTagMask));
-
+ __ JumpIfSmi(v0, &runtime);
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
- __ And(t4, v0, Operand(kIsNotStringMask));
+ __ And(t0, a1, Operand(kIsNotStringMask));
- __ Branch(&runtime, ne, t4, Operand(zero_reg));
+ __ Branch(&runtime, ne, t0, Operand(zero_reg));
// Short-cut for the case of trivial substring.
Label return_v0;
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 201742e..7adc404 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -149,6 +149,27 @@
}
#endif
+ // We can optionally optimize based on counters rather than statistical
+ // sampling.
+ if (info->ShouldSelfOptimize()) {
+ if (FLAG_trace_opt) {
+ PrintF("[adding self-optimization header to %s]\n",
+ *info->function()->debug_name()->ToCString());
+ }
+ MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
+ Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
+ JSGlobalPropertyCell* cell;
+ if (maybe_cell->To(&cell)) {
+ __ li(a2, Handle<JSGlobalPropertyCell>(cell));
+ __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ Subu(a3, a3, Operand(Smi::FromInt(1)));
+ __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ Handle<Code> compile_stub(
+ isolate()->builtins()->builtin(Builtins::kLazyRecompile));
+ __ Jump(compile_stub, RelocInfo::CODE_TARGET, eq, a3, Operand(zero_reg));
+ }
+ }
+
// Strict mode functions and builtins need to replace the receiver
// with undefined when called as functions (without an explicit
// receiver object). t1 is zero for method calls and non-zero for
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index aead65c..34e1a82 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -4334,12 +4334,12 @@
void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
-
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
- __ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
+ // Set up the parameters to the stub/runtime call.
+ __ LoadHeapObject(t0, literals);
__ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
__ li(a2, Operand(constant_properties));
int flags = instr->hydrogen()->fast_elements()
@@ -4348,7 +4348,7 @@
__ li(a1, Operand(Smi::FromInt(flags)));
__ Push(t0, a3, a2, a1);
- // Pick the right runtime function to call.
+ // Pick the right runtime function or stub to call.
int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index f4e043a..7a733bc 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -2679,8 +2679,7 @@
}
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
- Register value) {
+void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
@@ -2690,24 +2689,9 @@
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in v0.
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate());
- li(a0, Operand(false, RelocInfo::NONE));
- li(a2, Operand(external_caught));
- sw(a0, MemOperand(a2));
-
- // Set pending exception and v0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
- sw(v0, MemOperand(a2));
- } else if (!value.is(v0)) {
+ if (!value.is(v0)) {
mov(v0, value);
}
-
// Drop the stack pointer to the top of the top stack handler.
li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
lw(sp, MemOperand(a3));
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 69b3f9d..56a3433 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -871,12 +871,12 @@
// Must preserve the result register.
void PopTryHandler();
- // Passes thrown value (in v0) to the handler of top of the try handler chain.
+ // Passes thrown value to the handler of top of the try handler chain.
void Throw(Register value);
// Propagates an uncatchable exception to the top of the current JS stack's
// handler chain.
- void ThrowUncatchable(UncatchableExceptionType type, Register value);
+ void ThrowUncatchable(Register value);
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 3a667a4..628c61f 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -280,7 +280,9 @@
(map()->inobject_properties() + properties()->length() -
map()->NextFreePropertyIndex()));
}
- ASSERT_EQ((map()->has_fast_elements() || map()->has_fast_smi_only_elements()),
+ ASSERT_EQ((map()->has_fast_elements() ||
+ map()->has_fast_smi_only_elements() ||
+ (elements() == GetHeap()->empty_fixed_array())),
(elements()->map() == GetHeap()->fixed_array_map() ||
elements()->map() == GetHeap()->fixed_cow_array_map()));
ASSERT(map()->has_fast_elements() == HasFastElements());
diff --git a/src/objects-inl.h b/src/objects-inl.h
index a5ea659..a5aa5a8 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1339,11 +1339,12 @@
}
}
ASSERT((map()->has_fast_elements() ||
- map()->has_fast_smi_only_elements()) ==
+ map()->has_fast_smi_only_elements() ||
+ (value == GetHeap()->empty_fixed_array())) ==
(value->map() == GetHeap()->fixed_array_map() ||
value->map() == GetHeap()->fixed_cow_array_map()));
- ASSERT(map()->has_fast_double_elements() ==
- value->IsFixedDoubleArray());
+ ASSERT((value == GetHeap()->empty_fixed_array()) ||
+ (map()->has_fast_double_elements() == value->IsFixedDoubleArray()));
WRITE_FIELD(this, kElementsOffset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
}
@@ -2052,16 +2053,6 @@
}
-void DescriptorArray::CopyFrom(int index,
- DescriptorArray* src,
- int src_index,
- const WhitenessWitness& witness) {
- Descriptor desc;
- src->Get(src_index, &desc);
- Set(index, &desc, witness);
-}
-
-
void DescriptorArray::NoIncrementalWriteBarrierSwapDescriptors(
int first, int second) {
NoIncrementalWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
diff --git a/src/objects.cc b/src/objects.cc
index aef0284..1e90868 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -3379,12 +3379,10 @@
} else {
property_count += 2; // Make space for two more properties.
}
- Object* obj;
- { MaybeObject* maybe_obj =
- StringDictionary::Allocate(property_count);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ StringDictionary* dictionary;
+ { MaybeObject* maybe_dictionary = StringDictionary::Allocate(property_count);
+ if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
}
- StringDictionary* dictionary = StringDictionary::cast(obj);
DescriptorArray* descs = map_of_this->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
@@ -3394,36 +3392,31 @@
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, details.index());
Object* value = descs->GetConstantFunction(i);
- Object* result;
- { MaybeObject* maybe_result =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- dictionary = StringDictionary::cast(result);
+ MaybeObject* maybe_dictionary =
+ dictionary->Add(descs->GetKey(i), value, d);
+ if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
break;
}
case FIELD: {
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, details.index());
Object* value = FastPropertyAt(descs->GetFieldIndex(i));
- Object* result;
- { MaybeObject* maybe_result =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- dictionary = StringDictionary::cast(result);
+ MaybeObject* maybe_dictionary =
+ dictionary->Add(descs->GetKey(i), value, d);
+ if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
break;
}
case CALLBACKS: {
- PropertyDetails d =
- PropertyDetails(details.attributes(), CALLBACKS, details.index());
+ if (!descs->IsProperty(i)) break;
Object* value = descs->GetCallbacksObject(i);
- Object* result;
- { MaybeObject* maybe_result =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ if (value->IsAccessorPair()) {
+ MaybeObject* maybe_copy =
+ AccessorPair::cast(value)->CopyWithoutTransitions();
+ if (!maybe_copy->To(&value)) return maybe_copy;
}
- dictionary = StringDictionary::cast(result);
+ MaybeObject* maybe_dictionary =
+ dictionary->Add(descs->GetKey(i), value, details);
+ if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
break;
}
case MAP_TRANSITION:
@@ -3445,12 +3438,12 @@
int index = map_of_this->instance_descriptors()->NextEnumerationIndex();
dictionary->SetNextEnumerationIndex(index);
- { MaybeObject* maybe_obj =
+ Map* new_map;
+ { MaybeObject* maybe_map =
current_heap->isolate()->context()->global_context()->
normalized_map_cache()->Get(this, mode);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ if (!maybe_map->To(&new_map)) return maybe_map;
}
- Map* new_map = Map::cast(obj);
// We have now successfully allocated all the necessary objects.
// Changes can now be made with the guarantee that all of them take effect.
@@ -5735,6 +5728,33 @@
}
+void DescriptorArray::CopyFrom(Handle<DescriptorArray> dst,
+ int dst_index,
+ Handle<DescriptorArray> src,
+ int src_index,
+ const WhitenessWitness& witness) {
+ CALL_HEAP_FUNCTION_VOID(dst->GetIsolate(),
+ dst->CopyFrom(dst_index, *src, src_index, witness));
+}
+
+
+MaybeObject* DescriptorArray::CopyFrom(int dst_index,
+ DescriptorArray* src,
+ int src_index,
+ const WhitenessWitness& witness) {
+ Object* value = src->GetValue(src_index);
+ PropertyDetails details(src->GetDetails(src_index));
+ if (details.type() == CALLBACKS && value->IsAccessorPair()) {
+ MaybeObject* maybe_copy =
+ AccessorPair::cast(value)->CopyWithoutTransitions();
+ if (!maybe_copy->To(&value)) return maybe_copy;
+ }
+ Descriptor desc(src->GetKey(src_index), value, details);
+ Set(dst_index, &desc, witness);
+ return this;
+}
+
+
MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
TransitionFlag transition_flag) {
// Transitions are only kept when inserting another transition.
@@ -5818,7 +5838,9 @@
} else {
if (!(IsNullDescriptor(from_index) ||
(remove_transitions && IsTransitionOnly(from_index)))) {
- new_descriptors->CopyFrom(to_index++, this, from_index, witness);
+ MaybeObject* copy_result =
+ new_descriptors->CopyFrom(to_index++, this, from_index, witness);
+ if (copy_result->IsFailure()) return copy_result;
}
from_index++;
}
@@ -5858,7 +5880,9 @@
int next_descriptor = 0;
for (int i = 0; i < number_of_descriptors(); i++) {
if (IsProperty(i)) {
- new_descriptors->CopyFrom(next_descriptor++, this, i, witness);
+ MaybeObject* copy_result =
+ new_descriptors->CopyFrom(next_descriptor++, this, i, witness);
+ if (copy_result->IsFailure()) return copy_result;
}
}
ASSERT(next_descriptor == new_descriptors->number_of_descriptors());
@@ -5971,6 +5995,18 @@
}
+MaybeObject* AccessorPair::CopyWithoutTransitions() {
+ Heap* heap = GetHeap();
+ AccessorPair* copy;
+ { MaybeObject* maybe_copy = heap->AllocateAccessorPair();
+ if (!maybe_copy->To(©)) return maybe_copy;
+ }
+ copy->set_getter(getter()->IsMap() ? heap->the_hole_value() : getter());
+ copy->set_setter(setter()->IsMap() ? heap->the_hole_value() : setter());
+ return copy;
+}
+
+
MaybeObject* DeoptimizationInputData::Allocate(int deopt_entry_count,
PretenureFlag pretenure) {
ASSERT(deopt_entry_count > 0);
@@ -11278,6 +11314,9 @@
template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
AtPut(uint32_t, Object*);
+template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>::
+ SlowReverseLookup(Object* value);
+
template Object* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
SlowReverseLookup(Object* value);
diff --git a/src/objects.h b/src/objects.h
index bc18bf8..cdbb31a 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -168,6 +168,11 @@
ALLOW_ELEMENT_TRANSITION_MAPS
};
+enum KeyedAccessGrowMode {
+ DO_NOT_ALLOW_JSARRAY_GROWTH,
+ ALLOW_JSARRAY_GROWTH
+};
+
const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
void PrintElementsKind(FILE* out, ElementsKind kind);
@@ -2429,12 +2434,20 @@
Descriptor* desc,
const WhitenessWitness&);
- // Transfer complete descriptor from another descriptor array to
- // this one.
- inline void CopyFrom(int index,
- DescriptorArray* src,
+ // Transfer a complete descriptor from the src descriptor array to the dst
+ // one, dropping map transitions in CALLBACKS.
+ static void CopyFrom(Handle<DescriptorArray> dst,
+ int dst_index,
+ Handle<DescriptorArray> src,
int src_index,
- const WhitenessWitness&);
+ const WhitenessWitness& witness);
+
+ // Transfer a complete descriptor from the src descriptor array to this
+ // descriptor array, dropping map transitions in CALLBACKS.
+ MUST_USE_RESULT MaybeObject* CopyFrom(int dst_index,
+ DescriptorArray* src,
+ int src_index,
+ const WhitenessWitness&);
// Copy the descriptor array, insert a new descriptor and optionally
// remove map transitions. If the descriptor is already present, it is
@@ -4216,6 +4229,28 @@
// Find the first map in an IC stub.
Map* FindFirstMap();
+ class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {};
+ class ExtraICStateKeyedAccessGrowMode:
+ public BitField<KeyedAccessGrowMode, 1, 1> {}; // NOLINT
+
+ static const int kExtraICStateGrowModeShift = 1;
+
+ static inline StrictModeFlag GetStrictMode(ExtraICState extra_ic_state) {
+ return ExtraICStateStrictMode::decode(extra_ic_state);
+ }
+
+ static inline KeyedAccessGrowMode GetKeyedAccessGrowMode(
+ ExtraICState extra_ic_state) {
+ return ExtraICStateKeyedAccessGrowMode::decode(extra_ic_state);
+ }
+
+ static inline ExtraICState ComputeExtraICState(
+ KeyedAccessGrowMode grow_mode,
+ StrictModeFlag strict_mode) {
+ return ExtraICStateKeyedAccessGrowMode::encode(grow_mode) |
+ ExtraICStateStrictMode::encode(strict_mode);
+ }
+
// Flags operations.
static inline Flags ComputeFlags(
Kind kind,
@@ -7755,6 +7790,8 @@
static inline AccessorPair* cast(Object* obj);
+ MUST_USE_RESULT MaybeObject* CopyWithoutTransitions();
+
#ifdef OBJECT_PRINT
void AccessorPairPrint(FILE* out = stdout);
#endif
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index c6e6131..8b645de 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -1208,11 +1208,13 @@
template <> struct SnapshotSizeConstants<4> {
static const int kExpectedHeapGraphEdgeSize = 12;
static const int kExpectedHeapEntrySize = 36;
+ static const int kMaxSerializableSnapshotRawSize = 256 * MB;
};
template <> struct SnapshotSizeConstants<8> {
static const int kExpectedHeapGraphEdgeSize = 24;
static const int kExpectedHeapEntrySize = 48;
+ static const int kMaxSerializableSnapshotRawSize = 768 * MB;
};
} // namespace
@@ -1775,9 +1777,11 @@
} else if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
+ const char* name = shared->bound() ? "native_bind" :
+ collection_->names()->GetName(String::cast(shared->name()));
return AddEntry(object,
HeapEntry::kClosure,
- collection_->names()->GetName(String::cast(shared->name())),
+ name,
children_count,
retainers_count);
} else if (object->IsJSRegExp()) {
@@ -2011,19 +2015,22 @@
heap_->prototype_symbol(), js_fun->prototype());
}
}
+ SharedFunctionInfo* shared_info = js_fun->shared();
+ // JSFunction has either bindings or literals and never both.
+ bool bound = shared_info->bound();
+ TagObject(js_fun->literals_or_bindings(),
+ bound ? "(function bindings)" : "(function literals)");
SetInternalReference(js_fun, entry,
- "shared", js_fun->shared(),
+ bound ? "bindings" : "literals",
+ js_fun->literals_or_bindings(),
+ JSFunction::kLiteralsOffset);
+ SetInternalReference(js_fun, entry,
+ "shared", shared_info,
JSFunction::kSharedFunctionInfoOffset);
TagObject(js_fun->unchecked_context(), "(context)");
SetInternalReference(js_fun, entry,
"context", js_fun->unchecked_context(),
JSFunction::kContextOffset);
- TagObject(js_fun->literals_or_bindings(),
- "(function literals_or_bindings)");
- SetInternalReference(js_fun, entry,
- "literals_or_bindings",
- js_fun->literals_or_bindings(),
- JSFunction::kLiteralsOffset);
for (int i = JSFunction::kNonWeakFieldsEndOffset;
i < JSFunction::kSize;
i += kPointerSize) {
@@ -2126,17 +2133,6 @@
SetInternalReference(obj, entry,
"line_ends", script->line_ends(),
Script::kLineEndsOffset);
- } else if (obj->IsDescriptorArray()) {
- DescriptorArray* desc_array = DescriptorArray::cast(obj);
- if (desc_array->length() > DescriptorArray::kContentArrayIndex) {
- Object* content_array =
- desc_array->get(DescriptorArray::kContentArrayIndex);
- TagObject(content_array, "(map descriptor content)");
- SetInternalReference(obj, entry,
- "content", content_array,
- FixedArray::OffsetOfElementAt(
- DescriptorArray::kContentArrayIndex));
- }
} else if (obj->IsCodeCache()) {
CodeCache* code_cache = CodeCache::cast(obj);
TagObject(code_cache->default_cache(), "(default code cache)");
@@ -2162,11 +2158,27 @@
void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
HeapEntry* entry) {
- if (js_obj->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(js_obj);
- Context* context = func->context();
- ScopeInfo* scope_info = context->closure()->shared()->scope_info();
+ if (!js_obj->IsJSFunction()) return;
+ JSFunction* func = JSFunction::cast(js_obj);
+ Context* context = func->context();
+ ScopeInfo* scope_info = context->closure()->shared()->scope_info();
+
+ if (func->shared()->bound()) {
+ FixedArray* bindings = func->function_bindings();
+ SetNativeBindReference(js_obj, entry, "bound_this",
+ bindings->get(JSFunction::kBoundThisIndex));
+ SetNativeBindReference(js_obj, entry, "bound_function",
+ bindings->get(JSFunction::kBoundFunctionIndex));
+ for (int i = JSFunction::kBoundArgumentsStartIndex;
+ i < bindings->length(); i++) {
+ const char* reference_name = collection_->names()->GetFormatted(
+ "bound_argument_%d",
+ i - JSFunction::kBoundArgumentsStartIndex);
+ SetNativeBindReference(js_obj, entry, reference_name,
+ bindings->get(i));
+ }
+ } else {
// Add context allocated locals.
int context_locals = scope_info->ContextLocalCount();
for (int i = 0; i < context_locals; ++i) {
@@ -2444,6 +2456,22 @@
}
+void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ const char* reference_name,
+ Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetNamedReference(HeapGraphEdge::kShortcut,
+ parent_obj,
+ parent_entry,
+ reference_name,
+ child_obj,
+ child_entry);
+ }
+}
+
+
void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
HeapEntry* parent_entry,
int index,
@@ -2617,7 +2645,6 @@
!obj->IsOddball() &&
obj != heap_->raw_unchecked_empty_byte_array() &&
obj != heap_->raw_unchecked_empty_fixed_array() &&
- obj != heap_->raw_unchecked_empty_fixed_double_array() &&
obj != heap_->raw_unchecked_empty_descriptor_array()) {
objects_tags_.SetTag(obj, tag);
}
@@ -3411,15 +3438,13 @@
bool aborted_;
};
-const int HeapSnapshotJSONSerializer::kMaxSerializableSnapshotRawSize =
- 256 * MB;
-
void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
ASSERT(writer_ == NULL);
writer_ = new OutputStreamWriter(stream);
HeapSnapshot* original_snapshot = NULL;
- if (snapshot_->raw_entries_size() >= kMaxSerializableSnapshotRawSize) {
+ if (snapshot_->raw_entries_size() >=
+ SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) {
// The snapshot is too big. Serialize a fake snapshot.
original_snapshot = snapshot_;
snapshot_ = CreateFakeSnapshot();
@@ -3446,8 +3471,14 @@
snapshot_->uid());
result->AllocateEntries(2, 1, 0);
HeapEntry* root = result->AddRootEntry(1);
+ const char* text = snapshot_->collection()->names()->GetFormatted(
+ "The snapshot is too big. "
+ "Maximum snapshot size is %d MB. "
+ "Actual snapshot size is %d MB.",
+ SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB,
+ (snapshot_->raw_entries_size() + MB - 1) / MB);
HeapEntry* message = result->AddEntry(
- HeapEntry::kString, "The snapshot is too big", 0, 4, 0, 0);
+ HeapEntry::kString, text, 0, 4, 0, 0);
root->SetUnidirElementReference(0, 1, message);
result->SetDominatorsToSelf();
return result;
diff --git a/src/profile-generator.h b/src/profile-generator.h
index a0dea58..a24f9a9 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -964,6 +964,10 @@
HeapEntry* parent,
String* reference_name,
Object* child);
+ void SetNativeBindReference(HeapObject* parent_obj,
+ HeapEntry* parent,
+ const char* reference_name,
+ Object* child);
void SetElementReference(HeapObject* parent_obj,
HeapEntry* parent,
int index,
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index c7f4f94..4bbfe17 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -397,12 +397,16 @@
Handle<JSObject> receiver,
KeyedIC::StubKind stub_kind,
StrictModeFlag strict_mode) {
+ KeyedAccessGrowMode grow_mode =
+ KeyedIC::GetGrowModeFromStubKind(stub_kind);
+ Code::ExtraICState extra_state =
+ Code::ComputeExtraICState(grow_mode, strict_mode);
Code::Flags flags =
Code::ComputeMonomorphicFlags(
stub_kind == KeyedIC::LOAD ? Code::KEYED_LOAD_IC
: Code::KEYED_STORE_IC,
NORMAL,
- strict_mode);
+ extra_state);
Handle<String> name;
switch (stub_kind) {
case KeyedIC::LOAD:
@@ -411,6 +415,9 @@
case KeyedIC::STORE_NO_TRANSITION:
name = isolate()->factory()->KeyedStoreElementMonomorphic_symbol();
break;
+ case KeyedIC::STORE_AND_GROW_NO_TRANSITION:
+ name = isolate()->factory()->KeyedStoreAndGrowElementMonomorphic_symbol();
+ break;
default:
UNREACHABLE();
break;
@@ -426,8 +433,15 @@
code = compiler.CompileLoadElement(receiver_map);
break;
}
+ case KeyedIC::STORE_AND_GROW_NO_TRANSITION: {
+ KeyedStoreStubCompiler compiler(isolate_, strict_mode,
+ ALLOW_JSARRAY_GROWTH);
+ code = compiler.CompileStoreElement(receiver_map);
+ break;
+ }
case KeyedIC::STORE_NO_TRANSITION: {
- KeyedStoreStubCompiler compiler(isolate_, strict_mode);
+ KeyedStoreStubCompiler compiler(isolate_, strict_mode,
+ DO_NOT_ALLOW_JSARRAY_GROWTH);
code = compiler.CompileStoreElement(receiver_map);
break;
}
@@ -519,7 +533,8 @@
Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
- KeyedStoreStubCompiler compiler(isolate(), strict_mode);
+ KeyedStoreStubCompiler compiler(isolate(), strict_mode,
+ DO_NOT_ALLOW_JSARRAY_GROWTH);
Handle<Code> code =
compiler.CompileStoreField(receiver, field_index, transition, name);
PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
@@ -1349,8 +1364,10 @@
Handle<Code> KeyedStoreStubCompiler::GetCode(PropertyType type,
Handle<String> name,
InlineCacheState state) {
+ Code::ExtraICState extra_state =
+ Code::ComputeExtraICState(grow_mode_, strict_mode_);
Code::Flags flags =
- Code::ComputeFlags(Code::KEYED_STORE_IC, state, strict_mode_, type);
+ Code::ComputeFlags(Code::KEYED_STORE_IC, state, extra_state, type);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 398d9f4..fdd8a12 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -670,8 +670,12 @@
class KeyedStoreStubCompiler: public StubCompiler {
public:
- KeyedStoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode)
- : StubCompiler(isolate), strict_mode_(strict_mode) { }
+ KeyedStoreStubCompiler(Isolate* isolate,
+ StrictModeFlag strict_mode,
+ KeyedAccessGrowMode grow_mode)
+ : StubCompiler(isolate),
+ strict_mode_(strict_mode),
+ grow_mode_(grow_mode) { }
Handle<Code> CompileStoreField(Handle<JSObject> object,
int index,
@@ -686,10 +690,12 @@
static void GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array,
- ElementsKind element_kind);
+ ElementsKind element_kind,
+ KeyedAccessGrowMode grow_mode);
static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
- bool is_js_array);
+ bool is_js_array,
+ KeyedAccessGrowMode grow_mode);
static void GenerateStoreExternalArray(MacroAssembler* masm,
ElementsKind elements_kind);
@@ -702,6 +708,7 @@
InlineCacheState state = MONOMORPHIC);
StrictModeFlag strict_mode_;
+ KeyedAccessGrowMode grow_mode_;
};
diff --git a/src/type-info.cc b/src/type-info.cc
index e663998..0b6e08f 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -110,7 +110,11 @@
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
+ bool allow_growth =
+ Code::GetKeyedAccessGrowMode(code->extra_ic_state()) ==
+ ALLOW_JSARRAY_GROWTH;
return code->is_keyed_store_stub() &&
+ !allow_growth &&
code->ic_state() == MONOMORPHIC &&
Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
code->FindFirstMap() != NULL &&
@@ -125,7 +129,11 @@
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
Builtins* builtins = isolate_->builtins();
+ bool allow_growth =
+ Code::GetKeyedAccessGrowMode(code->extra_ic_state()) ==
+ ALLOW_JSARRAY_GROWTH;
return code->is_keyed_store_stub() &&
+ !allow_growth &&
*code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) &&
*code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) &&
code->ic_state() == MEGAMORPHIC;
diff --git a/src/version.cc b/src/version.cc
index 03b16ce..89ad901 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 9
-#define BUILD_NUMBER 5
+#define BUILD_NUMBER 6
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index d9361fd..2ea68b3 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1160,7 +1160,7 @@
static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array,
- has_non_smi_element;
+ has_non_smi_element, finish, cant_transition_map, not_double;
// Check for array construction with zero arguments.
__ testq(rax, rax);
@@ -1265,11 +1265,11 @@
__ movq(rcx, rax);
__ jmp(&entry);
__ bind(&loop);
- __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
+ __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(kScratchRegister, &has_non_smi_element);
+ __ JumpIfNotSmi(r8, &has_non_smi_element);
}
- __ movq(Operand(rdx, 0), kScratchRegister);
+ __ movq(Operand(rdx, 0), r8);
__ addq(rdx, Immediate(kPointerSize));
__ bind(&entry);
__ decq(rcx);
@@ -1280,6 +1280,7 @@
// rbx: JSArray
// esp[0]: return address
// esp[8]: last argument
+ __ bind(&finish);
__ pop(rcx);
__ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
__ push(rcx);
@@ -1287,8 +1288,38 @@
__ ret(0);
__ bind(&has_non_smi_element);
+ // Double values are handled by the runtime.
+ __ CheckMap(r8,
+ masm->isolate()->factory()->heap_number_map(),
+ ¬_double,
+ DONT_DO_SMI_CHECK);
+ __ bind(&cant_transition_map);
__ UndoAllocationInNewSpace(rbx);
__ jmp(call_generic_code);
+
+ __ bind(¬_double);
+ // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+ // rbx: JSArray
+ __ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ r11,
+ kScratchRegister,
+ &cant_transition_map);
+
+ __ movq(FieldOperand(rbx, HeapObject::kMapOffset), r11);
+ __ RecordWriteField(rbx, HeapObject::kMapOffset, r11, r8,
+ kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Finish the array initialization loop.
+ Label loop2;
+ __ bind(&loop2);
+ __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
+ __ movq(Operand(rdx, 0), r8);
+ __ addq(rdx, Immediate(kPointerSize));
+ __ decq(rcx);
+ __ j(greater_equal, &loop2);
+ __ jmp(&finish);
}
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 9feef08..559379b 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -3054,7 +3054,7 @@
__ Throw(rax);
__ bind(&termination_exception);
- __ ThrowUncatchable(TERMINATION, rax);
+ __ ThrowUncatchable(rax);
// External string. Short external strings have already been ruled out.
// rdi: subject string (expected to be external)
@@ -3775,12 +3775,6 @@
}
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // Throw exception in eax.
- __ Throw(rax);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -3921,12 +3915,6 @@
}
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- __ ThrowUncatchable(type, rax);
-}
-
-
void CEntryStub::Generate(MacroAssembler* masm) {
// rax: number of arguments including receiver
// rbx: pointer to C function (C callee-saved)
@@ -3990,13 +3978,25 @@
true);
__ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+ // Set external caught exception to false.
+ Isolate* isolate = masm->isolate();
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate);
+ __ Set(rax, static_cast<int64_t>(false));
+ __ Store(external_caught, rax);
+
+ // Set pending exception and rax to out of memory exception.
+ ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
+ isolate);
+ __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ __ Store(pending_exception, rax);
+ // Fall through to the next label.
__ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
+ __ ThrowUncatchable(rax);
__ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
+ __ Throw(rax);
}
@@ -5966,11 +5966,13 @@
// KeyedStoreIC::GenerateGeneric.
{ rbx, rdx, rcx, EMIT_REMEMBERED_SET},
// KeyedStoreStubCompiler::GenerateStoreFastElement.
- { rdi, rdx, rcx, EMIT_REMEMBERED_SET},
+ { rdi, rbx, rcx, EMIT_REMEMBERED_SET},
+ { rdx, rdi, rbx, EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ rdx, rbx, rdi, EMIT_REMEMBERED_SET},
+ { rdx, rbx, rdi, OMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ rdx, r11, r15, EMIT_REMEMBERED_SET},
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index f7e8fc1..8947f70 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -182,12 +182,17 @@
// -- rsp[0] : return address
// -----------------------------------
// The fail label is not actually used since we do not allocate.
- Label allocated, cow_array;
+ Label allocated, cow_array, only_change_map, done;
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
+ __ j(equal, &only_change_map);
// Check backing store for COW-ness. If the negative case, we do not have to
// allocate a new array, since FixedArray and FixedDoubleArray do not differ
// in size.
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
__ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
Heap::kFixedCOWArrayMapRootIndex);
@@ -241,6 +246,18 @@
__ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
__ jmp(&allocated);
+ __ bind(&only_change_map);
+ // Set transitioned map.
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ RecordWriteField(rdx,
+ HeapObject::kMapOffset,
+ rbx,
+ rdi,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&done);
+
// Conversion loop.
__ bind(&loop);
__ movq(rbx,
@@ -264,6 +281,8 @@
__ bind(&entry);
__ decq(r9);
__ j(not_sign, &loop);
+
+ __ bind(&done);
}
@@ -276,7 +295,14 @@
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label loop, entry, convert_hole, gc_required;
+ Label loop, entry, convert_hole, gc_required, only_change_map;
+
+ // Check for empty arrays, which only require a map transition and no changes
+ // to the backing store.
+ __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
+ __ j(equal, &only_change_map);
+
__ push(rax);
__ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -345,15 +371,6 @@
__ decq(r9);
__ j(not_sign, &loop);
- // Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
- __ RecordWriteField(rdx,
- HeapObject::kMapOffset,
- rbx,
- rdi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created and filled FixedArray.
__ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
__ RecordWriteField(rdx,
@@ -365,6 +382,17 @@
OMIT_SMI_CHECK);
__ pop(rax);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+ __ bind(&only_change_map);
+ // Set transitioned map.
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
+ __ RecordWriteField(rdx,
+ HeapObject::kMapOffset,
+ rbx,
+ rdi,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
}
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 7c445cb..ab0870b 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -4028,12 +4028,12 @@
void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
+ Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
// Set up the parameters to the stub/runtime call.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
+ __ PushHeapObject(literals);
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
__ Push(constant_properties);
int flags = instr->hydrogen()->fast_elements()
@@ -4044,7 +4044,7 @@
: ObjectLiteral::kNoFlags;
__ Push(Smi::FromInt(flags));
- // Pick the right runtime function to call.
+ // Pick the right runtime function or stub to call.
int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 9b5b355..c070fb2 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2552,8 +2552,7 @@
}
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
- Register value) {
+void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
@@ -2563,22 +2562,9 @@
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in rax.
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate());
- Set(rax, static_cast<int64_t>(false));
- Store(external_caught, rax);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate());
- movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
- Store(pending_exception, rax);
- } else if (!value.is(rax)) {
+ if (!value.is(rax)) {
movq(rax, value);
}
-
// Drop the stack pointer to the top of the top stack handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
Load(rsp, handler_address);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 52b5815..aff496f 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -971,7 +971,7 @@
void Throw(Register value);
// Propagate an uncatchable exception out of the current JS stack.
- void ThrowUncatchable(UncatchableExceptionType type, Register value);
+ void ThrowUncatchable(Register value);
// ---------------------------------------------------------------------------
// Inline caching support
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 6e60993..0e7f206 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1384,19 +1384,19 @@
__ CheckFastSmiOnlyElements(rbx, &call_builtin);
// rdx: receiver
// rbx: map
+ __ movq(r9, rdi); // Backup rdi as it is going to be trashed.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
rbx,
- r10,
+ rdi,
&call_builtin);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
+ __ movq(rdi, r9);
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(rbx, &call_builtin);
}
- __ CheckFastObjectElements(rbx, &call_builtin);
-
// Save new length.
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
@@ -2441,7 +2441,7 @@
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub =
- KeyedStoreElementStub(is_js_array, elements_kind).GetCode();
+ KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
__ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
@@ -3499,14 +3499,16 @@
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
- ElementsKind elements_kind) {
+ ElementsKind elements_kind,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, transition_elements_kind, finish_store, grow;
+ Label check_capacity, slow;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3514,23 +3516,31 @@
// Check that the key is a smi.
__ JumpIfNotSmi(rcx, &miss_force_generic);
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(rax, &transition_elements_kind);
+ }
+
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &miss_force_generic);
-
// Check that the key is within bounds.
if (is_js_array) {
__ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
+ if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ j(above_equal, &grow);
+ } else {
+ __ j(above_equal, &miss_force_generic);
+ }
} else {
__ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
__ j(above_equal, &miss_force_generic);
}
+ __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &miss_force_generic);
+
+ __ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- __ JumpIfNotSmi(rax, &transition_elements_kind);
__ SmiToInteger32(rcx, rcx);
__ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
@@ -3542,8 +3552,8 @@
FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
__ movq(Operand(rcx, 0), rax);
// Make sure to preserve the value in register rax.
- __ movq(rdx, rax);
- __ RecordWrite(rdi, rcx, rdx, kDontSaveFPRegs);
+ __ movq(rbx, rax);
+ __ RecordWrite(rdi, rcx, rbx, kDontSaveFPRegs);
}
// Done.
@@ -3558,19 +3568,89 @@
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Grow the array by a single element if possible.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags are already set by previous
+ // compare.
+ __ j(not_equal, &miss_force_generic);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
+ __ j(not_equal, &check_capacity);
+
+ int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
+
+ // rax: value
+ // rcx: key
+ // rdx: receiver
+ // rdi: elements
+ // Make sure that the backing store can hold additional elements.
+ __ Move(FieldOperand(rdi, JSObject::kMapOffset),
+ masm->isolate()->factory()->fixed_array_map());
+ __ Move(FieldOperand(rdi, FixedArray::kLengthOffset),
+ Smi::FromInt(JSArray::kPreallocatedArrayElements));
+ __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
+ __ movq(FieldOperand(rdi, FixedArray::SizeFor(i)), rbx);
+ }
+
+ // Store the element at index zero.
+ __ movq(FieldOperand(rdi, FixedArray::SizeFor(0)), rax);
+
+ // Install the new backing store in the JSArray.
+ __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
+ __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
+ __ ret(0);
+
+ __ bind(&check_capacity);
+ // Check for cow elements, in general they are not handled by this stub.
+ __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
+ Heap::kFixedCOWArrayMapRootIndex);
+ __ j(equal, &miss_force_generic);
+
+ // rax: value
+ // rcx: key
+ // rdx: receiver
+ // rdi: elements
+ // Make sure that the backing store can hold additional elements.
+ __ cmpq(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+ __ j(above_equal, &slow);
+
+ // Grow the array and finish the store.
+ __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
+ Smi::FromInt(1));
+ __ jmp(&finish_store);
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ jmp(ic_slow, RelocInfo::CODE_TARGET);
+ }
}
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm,
- bool is_js_array) {
+ bool is_js_array,
+ KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label miss_force_generic, transition_elements_kind;
+ Label miss_force_generic, transition_elements_kind, finish_store;
+ Label grow, slow, check_capacity;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -3584,13 +3664,19 @@
// Check that the key is within bounds.
if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+ if (grow_mode == ALLOW_JSARRAY_GROWTH) {
+ __ j(above_equal, &grow);
+ } else {
+ __ j(above_equal, &miss_force_generic);
+ }
} else {
__ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
+ __ j(above_equal, &miss_force_generic);
}
- __ j(above_equal, &miss_force_generic);
// Handle smi values specially
+ __ bind(&finish_store);
__ SmiToInteger32(rcx, rcx);
__ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
&transition_elements_kind);
@@ -3607,6 +3693,71 @@
__ Integer32ToSmi(rcx, rcx);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic_miss, RelocInfo::CODE_TARGET);
+
+ if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
+ // Grow the array by a single element if possible.
+ __ bind(&grow);
+
+ // Make sure the array is only growing by a single element, anything else
+ // must be handled by the runtime. Flags are already set by previous
+ // compare.
+ __ j(not_equal, &miss_force_generic);
+
+ // Transition on values that can't be stored in a FixedDoubleArray.
+ Label value_is_smi;
+ __ JumpIfSmi(rax, &value_is_smi);
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &transition_elements_kind);
+ __ bind(&value_is_smi);
+
+ // Check for the empty array, and preallocate a small backing store if
+ // possible.
+ __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
+ __ j(not_equal, &check_capacity);
+
+ int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
+
+ // rax: value
+ // rcx: key
+ // rdx: receiver
+ // rdi: elements
+ // Initialize the new FixedDoubleArray. Leave elements unitialized for
+ // efficiency, they are guaranteed to be initialized before use.
+ __ Move(FieldOperand(rdi, JSObject::kMapOffset),
+ masm->isolate()->factory()->fixed_double_array_map());
+ __ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
+ Smi::FromInt(JSArray::kPreallocatedArrayElements));
+
+ // Install the new backing store in the JSArray.
+ __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
+ __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Increment the length of the array.
+ __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
+ __ jmp(&finish_store);
+
+ __ bind(&check_capacity);
+ // rax: value
+ // rcx: key
+ // rdx: receiver
+ // rdi: elements
+ // Make sure that the backing store can hold additional elements.
+ __ cmpq(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
+ __ j(above_equal, &slow);
+
+ // Grow the array and finish the store.
+ __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
+ Smi::FromInt(1));
+ __ jmp(&finish_store);
+
+ __ bind(&slow);
+ Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ jmp(ic_slow, RelocInfo::CODE_TARGET);
+ }
}
diff --git a/test/cctest/test-deoptimization.cc b/test/cctest/test-deoptimization.cc
index ea34a75..ee57d65 100644
--- a/test/cctest/test-deoptimization.cc
+++ b/test/cctest/test-deoptimization.cc
@@ -97,6 +97,14 @@
};
+// Abort any ongoing incremental marking to make sure that all weak global
+// handle callbacks are processed.
+static void NonIncrementalGC() {
+ // TODO(1608): This should use kAbortIncrementalMarking.
+ HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+}
+
+
static Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj,
const char* property_name) {
v8::Local<v8::Function> fun =
@@ -107,9 +115,7 @@
TEST(DeoptimizeSimple) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
// Test lazy deoptimization of a simple function.
{
@@ -119,9 +125,9 @@
"function h() { %DeoptimizeFunction(f); }"
"function g() { count++; h(); }"
"function f() { g(); };"
- "f();"
- "gc(); gc()");
+ "f();");
}
+ NonIncrementalGC();
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -135,9 +141,9 @@
"var count = 0;"
"function g() { count++; %DeoptimizeFunction(f); f(false); }"
"function f(x) { if (x) { g(); } else { return } };"
- "f(true);"
- "gc(); gc()");
+ "f(true);");
}
+ NonIncrementalGC();
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -147,9 +153,7 @@
TEST(DeoptimizeSimpleWithArguments) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
// Test lazy deoptimization of a simple function with some arguments.
{
@@ -159,9 +163,9 @@
"function h(x) { %DeoptimizeFunction(f); }"
"function g(x, y) { count++; h(x); }"
"function f(x, y, z) { g(1,x); y+z; };"
- "f(1, \"2\", false);"
- "gc(); gc()");
+ "f(1, \"2\", false);");
}
+ NonIncrementalGC();
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -176,9 +180,9 @@
"var count = 0;"
"function g(x, y) { count++; %DeoptimizeFunction(f); f(false, 1, y); }"
"function f(x, y, z) { if (x) { g(x, y); } else { return y + z; } };"
- "f(true, 1, \"2\");"
- "gc(); gc()");
+ "f(true, 1, \"2\");");
}
+ NonIncrementalGC();
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -188,9 +192,7 @@
TEST(DeoptimizeSimpleNested) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
// Test lazy deoptimization of a simple function. Have a nested function call
// do the deoptimization.
@@ -202,8 +204,8 @@
"function h(x, y, z) { return x + y + z; }"
"function g(z) { count++; %DeoptimizeFunction(f); return z;}"
"function f(x,y,z) { return h(x, y, g(z)); };"
- "result = f(1, 2, 3);"
- "gc(); gc()");
+ "result = f(1, 2, 3);");
+ NonIncrementalGC();
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(6, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -215,9 +217,7 @@
TEST(DeoptimizeRecursive) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
{
// Test lazy deoptimization of a simple function called recursively. Call
@@ -228,8 +228,9 @@
"var calls = 0;"
"function g() { count++; %DeoptimizeFunction(f); }"
"function f(x) { calls++; if (x > 0) { f(x - 1); } else { g(); } };"
- "f(10); gc(); gc()");
+ "f(10);");
}
+ NonIncrementalGC();
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(11, env->Global()->Get(v8_str("calls"))->Int32Value());
@@ -243,9 +244,7 @@
TEST(DeoptimizeMultiple) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
{
AlwaysOptimizeAllowNativesSyntaxNoInlining options;
@@ -261,9 +260,9 @@
"function f3(x, y, z) { f4(); return x + y + z; };"
"function f2(x, y) { return x + f3(y + 1, y + 1, y + 1) + y; };"
"function f1(x) { return f2(x + 1, x + 1) + x; };"
- "result = f1(1);"
- "gc(); gc()");
+ "result = f1(1);");
}
+ NonIncrementalGC();
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -273,9 +272,7 @@
TEST(DeoptimizeConstructor) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
{
AlwaysOptimizeAllowNativesSyntaxNoInlining options;
@@ -284,9 +281,9 @@
"function g() { count++;"
" %DeoptimizeFunction(f); }"
"function f() { g(); };"
- "result = new f() instanceof f;"
- "gc(); gc()");
+ "result = new f() instanceof f;");
}
+ NonIncrementalGC();
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK(env->Global()->Get(v8_str("result"))->IsTrue());
@@ -301,9 +298,9 @@
" %DeoptimizeFunction(f); }"
"function f(x, y) { this.x = x; g(); this.y = y; };"
"result = new f(1, 2);"
- "result = result.x + result.y;"
- "gc(); gc()");
+ "result = result.x + result.y;");
}
+ NonIncrementalGC();
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(3, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -313,9 +310,7 @@
TEST(DeoptimizeConstructorMultiple) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
{
AlwaysOptimizeAllowNativesSyntaxNoInlining options;
@@ -332,9 +327,9 @@
"function f2(x, y) {"
" this.result = x + new f3(y + 1, y + 1, y + 1).result + y; };"
"function f1(x) { this.result = new f2(x + 1, x + 1).result + x; };"
- "result = new f1(1).result;"
- "gc(); gc()");
+ "result = new f1(1).result;");
}
+ NonIncrementalGC();
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -344,9 +339,7 @@
TEST(DeoptimizeBinaryOperationADDString) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
const char* f_source = "function f(x, y) { return x + y; };";
@@ -376,9 +369,9 @@
// Call f and force deoptimization while processing the binary operation.
CompileRun("deopt = true;"
- "var result = f('a+', new X());"
- "gc(); gc();");
+ "var result = f('a+', new X());");
}
+ NonIncrementalGC();
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
@@ -428,18 +421,15 @@
// Call f and force deoptimization while processing the binary operation.
CompileRun("deopt = true;"
- "var result = f(7, new X());"
- "gc(); gc();");
-
+ "var result = f(7, new X());");
+ NonIncrementalGC();
CHECK(!GetJSFunction((*env)->Global(), "f")->IsOptimized());
}
TEST(DeoptimizeBinaryOperationADD) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
TestDeoptimizeBinaryOpHelper(&env, "+");
@@ -451,9 +441,7 @@
TEST(DeoptimizeBinaryOperationSUB) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
TestDeoptimizeBinaryOpHelper(&env, "-");
@@ -465,9 +453,7 @@
TEST(DeoptimizeBinaryOperationMUL) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
TestDeoptimizeBinaryOpHelper(&env, "*");
@@ -479,9 +465,7 @@
TEST(DeoptimizeBinaryOperationDIV) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
TestDeoptimizeBinaryOpHelper(&env, "/");
@@ -493,9 +477,7 @@
TEST(DeoptimizeBinaryOperationMOD) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
TestDeoptimizeBinaryOpHelper(&env, "%");
@@ -507,9 +489,7 @@
TEST(DeoptimizeCompare) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
const char* f_source = "function f(x, y) { return x < y; };";
@@ -539,9 +519,9 @@
// Call f and force deoptimization while processing the comparison.
CompileRun("deopt = true;"
- "var result = f('a', new X());"
- "gc(); gc();");
+ "var result = f('a', new X());");
}
+ NonIncrementalGC();
CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
@@ -552,9 +532,7 @@
TEST(DeoptimizeLoadICStoreIC) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
// Functions to generate load/store/keyed load/keyed store IC calls.
const char* f1_source = "function f1(x) { return x.y; };";
@@ -618,9 +596,9 @@
"var result = f1(new X());"
"g1(new X());"
"f2(new X(), 'z');"
- "g2(new X(), 'z');"
- "gc(); gc();");
+ "g2(new X(), 'z');");
}
+ NonIncrementalGC();
CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
@@ -634,9 +612,7 @@
TEST(DeoptimizeLoadICStoreICNested) {
v8::HandleScope scope;
- const char* extension_list[] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(1, extension_list);
- LocalContext env(&extensions);
+ LocalContext env;
// Functions to generate load/store/keyed load/keyed store IC calls.
const char* f1_source = "function f1(x) { return x.y; };";
@@ -701,9 +677,9 @@
// Call functions and force deoptimization while processing the ics.
CompileRun("deopt = true;"
- "var result = f1(new X());"
- "gc(); gc();");
+ "var result = f1(new X());");
}
+ NonIncrementalGC();
CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index f57477e..bf7e91b 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -147,6 +147,43 @@
}
+TEST(BoundFunctionInSnapshot) {
+ v8::HandleScope scope;
+ LocalContext env;
+ CompileRun(
+ "function myFunction(a, b) { this.a = a; this.b = b; }\n"
+ "function AAAAA() {}\n"
+ "boundFunction = myFunction.bind(new AAAAA(), 20, new Number(12)); \n");
+ const v8::HeapSnapshot* snapshot =
+ v8::HeapProfiler::TakeSnapshot(v8_str("sizes"));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* f =
+ GetProperty(global, v8::HeapGraphEdge::kShortcut, "boundFunction");
+ CHECK(f);
+ CHECK_EQ(v8::String::New("native_bind"), f->GetName());
+ const v8::HeapGraphNode* bindings =
+ GetProperty(f, v8::HeapGraphEdge::kInternal, "bindings");
+ CHECK_NE(NULL, bindings);
+ CHECK_EQ(v8::HeapGraphNode::kArray, bindings->GetType());
+ CHECK_EQ(4, bindings->GetChildrenCount());
+
+ const v8::HeapGraphNode* bound_this = GetProperty(
+ f, v8::HeapGraphEdge::kShortcut, "bound_this");
+ CHECK(bound_this);
+ CHECK_EQ(v8::HeapGraphNode::kObject, bound_this->GetType());
+
+ const v8::HeapGraphNode* bound_function = GetProperty(
+ f, v8::HeapGraphEdge::kShortcut, "bound_function");
+ CHECK(bound_function);
+ CHECK_EQ(v8::HeapGraphNode::kClosure, bound_function->GetType());
+
+ const v8::HeapGraphNode* bound_argument = GetProperty(
+ f, v8::HeapGraphEdge::kShortcut, "bound_argument_1");
+ CHECK(bound_argument);
+ CHECK_EQ(v8::HeapGraphNode::kObject, bound_argument->GetType());
+}
+
+
TEST(HeapSnapshotEntryChildren) {
v8::HandleScope scope;
LocalContext env;
diff --git a/test/mjsunit/array-store-and-grow.js b/test/mjsunit/array-store-and-grow.js
new file mode 100644
index 0000000..131d4eb
--- /dev/null
+++ b/test/mjsunit/array-store-and-grow.js
@@ -0,0 +1,183 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Verifies that the KeyedStoreIC correctly handles out-of-bounds stores
+// to an array that grow it by a single element. Test functions are
+// called twice to make sure that the IC is used, first call is handled
+// by the runtime in the miss stub.
+
+function array_store_1(a,b,c) {
+ return (a[b] = c);
+}
+
+// Check handling of the empty array.
+var a = [];
+array_store_1(a, 0, 1);
+a = [];
+array_store_1(a, 0, 1);
+assertEquals(1, a[0]);
+assertEquals(1, array_store_1([], 0, 1));
+
+a = [];
+for (x=0;x<100000;++x) {
+ assertEquals(x, array_store_1(a, x, x));
+}
+
+for (x=0;x<100000;++x) {
+ assertEquals(x, array_store_1([], 0, x));
+}
+
+function array_store_2(a,b,c) {
+ return (a[b] = c);
+}
+
+a = [];
+array_store_2(a, 0, 0.5);
+a = [];
+array_store_2(a, 0, 0.5);
+assertEquals(0.5, a[0]);
+assertEquals(0.5, array_store_2([], 0, 0.5));
+
+function array_store_3(a,b,c) {
+ return (a[b] = c);
+}
+
+x = new Object();
+a = [];
+array_store_3(a, 0, x);
+a = [];
+array_store_3(a, 0, x);
+assertEquals(x, a[0]);
+assertEquals(x, array_store_3([], 0, x));
+
+// Check the handling of COW arrays
+function makeCOW() {
+ return [1];
+}
+
+function array_store_4(a,b,c) {
+ return (a[b] = c);
+}
+
+a = makeCOW();
+array_store_4(a, 1, 1);
+a = makeCOW();
+array_store_4(a, 1, 1);
+assertEquals(1, a[1]);
+assertEquals(1, array_store_4([], 1, 1));
+
+function array_store_5(a,b,c) {
+ return (a[b] = c);
+}
+
+a = makeCOW();
+array_store_5(a, 1, 0.5);
+a = makeCOW();
+array_store_5(a, 1, 0.5);
+assertEquals(0.5, a[1]);
+assertEquals(0.5, array_store_5([], 1, 0.5));
+
+function array_store_6(a,b,c) {
+ return (a[b] = c);
+}
+
+a = makeCOW();
+array_store_6(a, 1, x);
+a = makeCOW();
+array_store_6(a, 1, x);
+assertEquals(x, a[1]);
+assertEquals(x, array_store_6([], 1, x));
+
+// Check the handling of mutable arrays.
+a = new Array(1,2,3);
+array_store_4(a, 3, 1);
+a = new Array(1,2,3);
+array_store_4(a, 3, 1);
+assertEquals(1, a[3]);
+assertEquals(1, array_store_4([], 3, 1));
+
+function array_store_5(a,b,c) {
+ return (a[b] = c);
+}
+
+a = new Array(1,2,3);
+array_store_5(a, 3, 0.5);
+a = new Array(1,2,3);
+array_store_5(a, 3, 0.5);
+assertEquals(0.5, a[3]);
+assertEquals(0.5, array_store_5([], 3, 0.5));
+
+function array_store_6(a,b,c) {
+ return (a[b] = c);
+}
+
+a = new Array(1,2,3);
+array_store_6(a, 3, x);
+a = new Array(1,2,3);
+array_store_6(a, 3, x);
+assertEquals(x, a[3]);
+assertEquals(x, array_store_6([], 3, x));
+
+function array_store_7(a,b,c) {
+ return (a[b] = c);
+}
+
+// Check the handling of mutable arrays of doubles
+var a = new Array(0.5, 1.5);
+array_store_7(a, 2, .5);
+a = new Array(0.5, 1.5);
+array_store_7(a, 2, .5);
+assertEquals(0.5, a[2]);
+a = new Array(0.5, 1.5);
+assertEquals(0.5, array_store_7(a, 2, 0.5));
+
+for (x=0;x<100000;++x) {
+ a = new Array(0.5, 1.5);
+ assertEquals(x, array_store_7(a, 2, x));
+}
+
+function array_store_8(a,b,c) {
+ return (a[b] = c);
+}
+
+var a = new Array(0.5, 1.5);
+array_store_8(a, 2, .5);
+a = new Array(0.5, 1.5);
+array_store_8(a, 10, .5);
+assertEquals(0.5, a[10]);
+
+// Grow the empty array with a double store.
+function array_store_9(a,b,c) {
+ return (a[b] = c);
+}
+
+var a = [];
+array_store_9(a, 0, 0.5);
+a = [];
+array_store_1(a, 0, 0.5);
+assertEquals(0.5, a[0]);
+assertEquals(0.5, array_store_1([], 0, 0.5));
diff --git a/test/mjsunit/compiler/inline-literals.js b/test/mjsunit/compiler/inline-literals.js
new file mode 100644
index 0000000..f78abe8
--- /dev/null
+++ b/test/mjsunit/compiler/inline-literals.js
@@ -0,0 +1,50 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test that we can inline functions containing materialized literals.
+
+function o2(b, c) {
+ return { 'b':b, 'c':c, 'y':b + c };
+}
+
+function o1(a, b, c) {
+ return { 'a':a, 'x':o2(b, c) };
+}
+
+function TestObjectLiteral(a, b, c) {
+ var expected = { 'a':a, 'x':{ 'b':b, 'c':c, 'y':b + c } };
+ var result = o1(a, b, c);
+ assertEquals(expected, result, "TestObjectLiteral");
+}
+
+TestObjectLiteral(1, 2, 3);
+TestObjectLiteral(1, 2, 3);
+%OptimizeFunctionOnNextCall(TestObjectLiteral);
+TestObjectLiteral(1, 2, 3);
+TestObjectLiteral('a', 'b', 'c');
diff --git a/test/mjsunit/elements-kind.js b/test/mjsunit/elements-kind.js
index c0bc333..4aa79de 100644
--- a/test/mjsunit/elements-kind.js
+++ b/test/mjsunit/elements-kind.js
@@ -147,6 +147,7 @@
// Crankshaft support for smi-only array elements.
function monomorphic(array) {
+ assertKind(elements_kind.fast_smi_only, array);
for (var i = 0; i < 3; i++) {
array[i] = i + 10;
}
@@ -157,6 +158,7 @@
}
}
var smi_only = new Array(1, 2, 3);
+assertKind(elements_kind.fast_smi_only, smi_only);
for (var i = 0; i < 3; i++) monomorphic(smi_only);
%OptimizeFunctionOnNextCall(monomorphic);
monomorphic(smi_only);
diff --git a/test/mjsunit/elements-transition-hoisting.js b/test/mjsunit/elements-transition-hoisting.js
index 53dc940..76f7b82 100644
--- a/test/mjsunit/elements-transition-hoisting.js
+++ b/test/mjsunit/elements-transition-hoisting.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --smi-only-arrays
+// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
// Ensure that ElementsKind transitions in various situations are hoisted (or
// not hoisted) correctly, don't change the semantics programs and don't trigger
@@ -39,6 +39,11 @@
print("Tests do NOT include smi-only arrays.");
}
+// Force existing ICs from previous stress runs to be flushed, otherwise the
+// assumptions in this test about when deoptimizations get triggered are not
+// valid.
+gc();
+
if (support_smi_only_arrays) {
// Make sure that a simple elements array transitions inside a loop before
// stores to an array gets hoisted in a way that doesn't generate a deopt in
diff --git a/test/mjsunit/tools/tickprocessor.js b/test/mjsunit/tools/tickprocessor.js
index 30b0ec2..c48d9f3 100644
--- a/test/mjsunit/tools/tickprocessor.js
+++ b/test/mjsunit/tools/tickprocessor.js
@@ -376,8 +376,11 @@
}
assertTrue(pathLen != -1);
var testsPath = TEST_FILE_NAME.substr(0, pathLen + 1);
- var tp = new TickProcessor(
- new CppEntriesProviderMock(), separateIc, ignoreUnknown, stateFilter);
+ var tp = new TickProcessor(new CppEntriesProviderMock(),
+ separateIc,
+ TickProcessor.CALL_GRAPH_SIZE,
+ ignoreUnknown,
+ stateFilter);
var pm = new PrintMonitor(testsPath + refOutput);
tp.processLogFileInTest(testsPath + logInput);
tp.printStatistics();
diff --git a/tools/tickprocessor-driver.js b/tools/tickprocessor-driver.js
index 4201e43..9af5ab6 100644
--- a/tools/tickprocessor-driver.js
+++ b/tools/tickprocessor-driver.js
@@ -52,6 +52,7 @@
var tickProcessor = new TickProcessor(
new (entriesProviders[params.platform])(params.nm),
params.separateIc,
+ params.callGraphSize,
params.ignoreUnknown,
params.stateFilter,
snapshotLogProcessor);
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index 5f57835..05a3369 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -146,7 +146,12 @@
function TickProcessor(
- cppEntriesProvider, separateIc, ignoreUnknown, stateFilter, snapshotLogProcessor) {
+ cppEntriesProvider,
+ separateIc,
+ callGraphSize,
+ ignoreUnknown,
+ stateFilter,
+ snapshotLogProcessor) {
LogReader.call(this, {
'shared-library': { parsers: [null, parseInt, parseInt],
processor: this.processSharedLibrary },
@@ -181,6 +186,7 @@
'end-code-region': null });
this.cppEntriesProvider_ = cppEntriesProvider;
+ this.callGraphSize_ = callGraphSize;
this.ignoreUnknown_ = ignoreUnknown;
this.stateFilter_ = stateFilter;
this.snapshotLogProcessor_ = snapshotLogProcessor;
@@ -240,6 +246,7 @@
TickProcessor.CALL_PROFILE_CUTOFF_PCT = 2.0;
+TickProcessor.CALL_GRAPH_SIZE = 5;
/**
* @override
@@ -535,7 +542,7 @@
padLeft(rec.parentTotalPercent.toFixed(1), 5) + '% ' +
indentStr + rec.internalFuncName);
// Limit backtrace depth.
- if (indent < 10) {
+ if (indent < 2 * self.callGraphSize_) {
self.printHeavyProfile(rec.children, indent + 2);
}
// Delimit top-level functions.
@@ -764,6 +771,8 @@
'Show only ticks from OTHER VM state'],
'-e': ['stateFilter', TickProcessor.VmStates.EXTERNAL,
'Show only ticks from EXTERNAL VM state'],
+ '--call-graph-size': ['callGraphSize', TickProcessor.CALL_GRAPH_SIZE,
+ 'Set the call graph size'],
'--ignore-unknown': ['ignoreUnknown', true,
'Exclude ticks of unknown code entries from processing'],
'--separate-ic': ['separateIc', true,
@@ -792,6 +801,7 @@
snapshotLogFileName: null,
platform: 'unix',
stateFilter: null,
+ callGraphSize: 5,
ignoreUnknown: false,
separateIc: false,
nm: 'nm'