Version 3.12.16
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@12191 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 047929a..33d4880 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2012-07-25: Version 3.12.16
+
+ Performance and stability improvements on all platforms.
+
+
2012-07-24: Version 3.12.15
Added PRESERVE_ASCII_NULL option to String::WriteAscii.
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index f55956c..4dcc21d 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -338,10 +338,6 @@
}
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 142;
-
-
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
@@ -355,7 +351,7 @@
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
@@ -407,7 +403,7 @@
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
Label ok;
diff --git a/src/factory.cc b/src/factory.cc
index 25989ca..df72da5 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -892,64 +892,6 @@
}
-void Factory::CopyAppendCallbackDescriptors(Handle<Map> map,
- Handle<Object> descriptors) {
- Handle<DescriptorArray> array(map->instance_descriptors());
- v8::NeanderArray callbacks(descriptors);
- int nof_callbacks = callbacks.length();
- int descriptor_count = array->number_of_descriptors();
- Handle<DescriptorArray> result =
- NewDescriptorArray(descriptor_count + nof_callbacks);
-
- // Ensure that marking will not progress and change color of objects.
- DescriptorArray::WhitenessWitness witness(*result);
-
- // Copy the descriptors from the array.
- if (0 < descriptor_count) {
- for (int i = 0; i < descriptor_count; i++) {
- result->CopyFrom(i, *array, i, witness);
- }
- }
-
- map->set_instance_descriptors(*result);
-
- // Fill in new callback descriptors. Process the callbacks from
- // back to front so that the last callback with a given name takes
- // precedence over previously added callbacks with that name.
- for (int i = nof_callbacks - 1; i >= 0; i--) {
- Handle<AccessorInfo> entry =
- Handle<AccessorInfo>(AccessorInfo::cast(callbacks.get(i)));
- // Ensure the key is a symbol before writing into the instance descriptor.
- Handle<String> key =
- SymbolFromString(Handle<String>(String::cast(entry->name())));
- // Check if a descriptor with this name already exists before writing.
- if (LinearSearch(*result, *key, map->NumberOfSetDescriptors()) ==
- DescriptorArray::kNotFound) {
- CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
- map->AppendDescriptor(&desc, witness);
- }
- }
-
- int new_number_of_descriptors = map->NumberOfSetDescriptors();
- // Reinstall the original descriptor array if no new elements were added.
- if (new_number_of_descriptors == descriptor_count) {
- map->set_instance_descriptors(*array);
- return;
- }
-
- // If duplicates were detected, allocate a result of the right size
- // and transfer the elements.
- if (new_number_of_descriptors < result->length()) {
- Handle<DescriptorArray> new_result =
- NewDescriptorArray(new_number_of_descriptors);
- for (int i = 0; i < new_number_of_descriptors; i++) {
- new_result->CopyFrom(i, *result, i, witness);
- }
- map->set_instance_descriptors(*new_result);
- }
-}
-
-
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
@@ -1337,7 +1279,7 @@
while (true) {
Handle<Object> props = Handle<Object>(obj->property_accessors());
if (!props->IsUndefined()) {
- CopyAppendCallbackDescriptors(map, props);
+ Map::CopyAppendCallbackDescriptors(map, props);
}
Handle<Object> parent = Handle<Object>(obj->parent_template());
if (parent->IsUndefined()) break;
diff --git a/src/factory.h b/src/factory.h
index 6b22140..e70f3b1 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -496,9 +496,6 @@
Handle<String> name,
LanguageMode language_mode);
- void CopyAppendCallbackDescriptors(Handle<Map> map,
- Handle<Object> descriptors);
-
// Create a new map cache.
Handle<MapCache> NewMapCache(int at_least_space_for);
diff --git a/src/full-codegen.h b/src/full-codegen.h
index a07df91..eca36b4 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -113,6 +113,21 @@
Zone* zone() const { return zone_; }
+ static const int kMaxBackEdgeWeight = 127;
+
+#if V8_TARGET_ARCH_IA32
+ static const int kBackEdgeDistanceUnit = 100;
+#elif V8_TARGET_ARCH_X64
+ static const int kBackEdgeDistanceUnit = 162;
+#elif V8_TARGET_ARCH_ARM
+ static const int kBackEdgeDistanceUnit = 142;
+#elif V8_TARGET_ARCH_MIPS
+ static const int kBackEdgeDistanceUnit = 142;
+#else
+#error Unsupported target architecture.
+#endif
+
+
private:
class Breakable;
class Iteration;
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index cdf965e..75253c0 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -325,10 +325,6 @@
}
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 100;
-
-
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
@@ -340,7 +336,7 @@
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
@@ -402,7 +398,7 @@
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
Label ok;
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 5a2074e..93ba3a0 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -713,6 +713,43 @@
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
+
+ __ push(a1); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
+
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore receiver.
+ __ pop(a1);
+
+ // Tear down internal frame.
+ }
+
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 0eea32c..63e3b44 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -7766,6 +7766,66 @@
}
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+ if (entry_hook_ != NULL) {
+ ProfileEntryHookStub stub;
+ __ push(ra);
+ __ CallStub(&stub);
+ __ pop(ra);
+ }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+ // The entry hook is a "push ra" instruction, followed by a call.
+ // Note: on MIPS "push" is 2 instruction
+ const int32_t kReturnAddressDistanceFromFunctionStart =
+ Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
+
+ // Save live volatile registers.
+ __ Push(ra, t1, a1);
+ const int32_t kNumSavedRegs = 3;
+
+ // Compute the function's address for the first argument.
+ __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
+
+ // The caller's return address is above the saved temporaries.
+ // Grab that for the second argument to the hook.
+ __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
+
+ // Align the stack if necessary.
+ int frame_alignment = masm->ActivationFrameAlignment();
+ if (frame_alignment > kPointerSize) {
+ __ mov(t1, sp);
+ ASSERT(IsPowerOf2(frame_alignment));
+ __ And(sp, sp, Operand(-frame_alignment));
+ }
+
+#if defined(V8_HOST_ARCH_MIPS)
+ __ li(at, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
+ __ lw(at, MemOperand(at));
+#else
+ // Under the simulator we need to indirect the entry hook through a
+ // trampoline function at a known address.
+ Address trampoline_address = reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(EntryHookTrampoline));
+ ApiFunction dispatcher(trampoline_address);
+ __ li(at, Operand(ExternalReference(&dispatcher,
+ ExternalReference::BUILTIN_CALL,
+ masm->isolate())));
+#endif
+ __ Call(at);
+
+ // Restore the stack pointer if needed.
+ if (frame_alignment > kPointerSize) {
+ __ mov(sp, t1);
+ }
+
+ __ Pop(ra, t1, a1);
+ __ Ret();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 6f4869e..4a19d6c 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -143,6 +143,8 @@
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -340,10 +342,6 @@
}
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 142;
-
-
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
@@ -360,7 +358,7 @@
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ slt(at, a3, zero_reg);
@@ -413,7 +411,7 @@
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
Label ok;
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index e924159..cb39b70 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -125,6 +125,8 @@
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -2528,8 +2530,13 @@
Register scratch = scratch0();
// Load the result.
- __ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
- __ addu(scratch, elements, scratch);
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(scratch, elements, scratch);
+ } else {
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
+ }
uint32_t offset = FixedArray::kHeaderSize +
(instr->additional_index() << kPointerSizeLog2);
__ lw(result, FieldMemOperand(scratch, offset));
@@ -2555,8 +2562,9 @@
DoubleRegister result = ToDoubleRegister(instr->result());
Register scratch = scratch0();
- int shift_size =
- ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
int constant_key = 0;
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
@@ -2569,14 +2577,15 @@
if (key_is_constant) {
__ Addu(elements, elements,
- Operand(((constant_key + instr->additional_index()) << shift_size) +
+ Operand(((constant_key + instr->additional_index()) <<
+ element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
__ sll(scratch, key, shift_size);
__ Addu(elements, elements, Operand(scratch));
__ Addu(elements, elements,
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << shift_size)));
+ (instr->additional_index() << element_size_shift)));
}
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2588,6 +2597,50 @@
}
+MemOperand LCodeGen::PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int additional_index,
+ int additional_offset) {
+ if (additional_index != 0 && !key_is_constant) {
+ additional_index *= 1 << (element_size - shift_size);
+ __ Addu(scratch0(), key, Operand(additional_index));
+ }
+
+ if (key_is_constant) {
+ return MemOperand(base,
+ (constant_key << element_size) + additional_offset);
+ }
+
+ if (additional_index == 0) {
+ if (shift_size >= 0) {
+ __ sll(scratch0(), key, shift_size);
+ __ Addu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ __ srl(scratch0(), key, 1);
+ __ Addu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ }
+ }
+
+ if (shift_size >= 0) {
+ __ sll(scratch0(), scratch0(), shift_size);
+ __ Addu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ } else {
+ ASSERT_EQ(-1, shift_size);
+ __ srl(scratch0(), scratch0(), 1);
+ __ Addu(scratch0(), base, scratch0());
+ return MemOperand(scratch0());
+ }
+}
+
+
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
Register external_pointer = ToRegister(instr->external_pointer());
@@ -2603,14 +2656,16 @@
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister result = ToDoubleRegister(instr->result());
if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key << shift_size);
+ __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
} else {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
@@ -2624,24 +2679,10 @@
}
} else {
Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ Addu(scratch, key, instr->additional_index());
- }
- MemOperand mem_operand(zero_reg);
- if (key_is_constant) {
- mem_operand =
- MemOperand(external_pointer,
- (constant_key << shift_size) + additional_offset);
- } else {
- if (instr->additional_index() == 0) {
- __ sll(scratch, key, shift_size);
- } else {
- __ sll(scratch, scratch, shift_size);
- }
- __ Addu(scratch, scratch, external_pointer);
- mem_operand = MemOperand(scratch);
- }
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ lb(result, mem_operand);
@@ -3572,8 +3613,13 @@
+ FixedArray::kHeaderSize;
__ sw(value, FieldMemOperand(elements, offset));
} else {
- __ sll(scratch, key, kPointerSizeLog2);
- __ addu(scratch, elements, scratch);
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(scratch, elements, scratch);
+ } else {
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
+ }
if (instr->additional_index() != 0) {
__ Addu(scratch,
scratch,
@@ -3619,9 +3665,11 @@
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
if (key_is_constant) {
- __ Addu(scratch, elements, Operand((constant_key << shift_size) +
+ __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
__ sll(scratch, key, shift_size);
@@ -3642,7 +3690,8 @@
}
__ bind(¬_nan);
- __ sdc1(value, MemOperand(scratch, instr->additional_index() << shift_size));
+ __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
+ element_size_shift));
}
@@ -3662,14 +3711,17 @@
} else {
key = ToRegister(instr->key());
}
- int shift_size = ElementsKindToShiftSize(elements_kind);
- int additional_offset = instr->additional_index() << shift_size;
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key << shift_size);
+ __ Addu(scratch0(), external_pointer, constant_key <<
+ element_size_shift);
} else {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
@@ -3683,24 +3735,10 @@
}
} else {
Register value(ToRegister(instr->value()));
- Register scratch = scratch0();
- if (instr->additional_index() != 0 && !key_is_constant) {
- __ Addu(scratch, key, instr->additional_index());
- }
- MemOperand mem_operand(zero_reg);
- if (key_is_constant) {
- mem_operand = MemOperand(external_pointer,
- ((constant_key + instr->additional_index())
- << shift_size));
- } else {
- if (instr->additional_index() == 0) {
- __ sll(scratch, key, shift_size);
- } else {
- __ sll(scratch, scratch, shift_size);
- }
- __ Addu(scratch, scratch, external_pointer);
- mem_operand = MemOperand(scratch);
- }
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index a2125d7..570fb07 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -128,6 +128,15 @@
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
+ MemOperand PrepareKeyedOperand(Register key,
+ Register base,
+ bool key_is_constant,
+ int constant_key,
+ int element_size,
+ int shift_size,
+ int additional_index,
+ int additional_offset);
+
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index da5beb6..f2fbb27 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1773,7 +1773,8 @@
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
ASSERT(instr->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
@@ -1785,7 +1786,8 @@
LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
HLoadKeyedFastDoubleElement* instr) {
ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseTempRegister(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
@@ -1805,7 +1807,8 @@
(representation.IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
@@ -1833,7 +1836,8 @@
bool needs_write_barrier = instr->NeedsWriteBarrier();
ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
@@ -1850,7 +1854,8 @@
HStoreKeyedFastDoubleElement* instr) {
ASSERT(instr->value()->representation().IsDouble());
ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* val = UseTempRegister(instr->value());
@@ -1872,7 +1877,8 @@
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 86b8f33..7f8bdfd 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -843,6 +843,7 @@
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+ DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
diff --git a/src/objects.cc b/src/objects.cc
index 6511843..67ff486 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2106,6 +2106,126 @@
}
+enum RightTrimMode { FROM_GC, FROM_MUTATOR };
+
+
+static void ZapEndOfFixedArray(Address new_end, int to_trim) {
+ // If we are doing a big trim in old space then we zap the space.
+ Object** zap = reinterpret_cast<Object**>(new_end);
+ for (int i = 1; i < to_trim; i++) {
+ *zap++ = Smi::FromInt(0);
+ }
+}
+
+template<RightTrimMode trim_mode>
+static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
+ ASSERT(elms->map() != HEAP->fixed_cow_array_map());
+ // For now this trick is only applied to fixed arrays in new and paged space.
+ // In large object space the object's start must coincide with chunk
+ // and thus the trick is just not applicable.
+ ASSERT(!HEAP->lo_space()->Contains(elms));
+
+ const int len = elms->length();
+
+ ASSERT(to_trim < len);
+
+ Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
+
+ if (trim_mode == FROM_GC) {
+#ifdef DEBUG
+ ZapEndOfFixedArray(new_end, to_trim);
+#endif
+ } else {
+ ZapEndOfFixedArray(new_end, to_trim);
+ }
+
+ int size_delta = to_trim * kPointerSize;
+
+ // Technically in new space this write might be omitted (except for
+ // debug mode which iterates through the heap), but to play safer
+ // we still do it.
+ heap->CreateFillerObjectAt(new_end, size_delta);
+
+ elms->set_length(len - to_trim);
+
+ // Maintain marking consistency for IncrementalMarking.
+ if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
+ if (trim_mode == FROM_GC) {
+ MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
+ } else {
+ MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
+ }
+ }
+}
+
+
+void Map::CopyAppendCallbackDescriptors(Handle<Map> map,
+ Handle<Object> descriptors) {
+ Isolate* isolate = map->GetIsolate();
+ Handle<DescriptorArray> array(map->instance_descriptors());
+ v8::NeanderArray callbacks(descriptors);
+ int nof_callbacks = callbacks.length();
+ int descriptor_count = array->number_of_descriptors();
+
+ // Ensure the keys are symbols before writing them into the instance
+ // descriptor. Since it may cause a GC, it has to be done before we
+ // temporarily put the heap in an invalid state while appending descriptors.
+ for (int i = 0; i < nof_callbacks; ++i) {
+ Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks.get(i)));
+ Handle<String> key =
+ isolate->factory()->SymbolFromString(
+ Handle<String>(String::cast(entry->name())));
+ entry->set_name(*key);
+ }
+
+ Handle<DescriptorArray> result =
+ isolate->factory()->NewDescriptorArray(descriptor_count + nof_callbacks);
+
+ // Ensure that marking will not progress and change color of objects.
+ DescriptorArray::WhitenessWitness witness(*result);
+
+ // Copy the descriptors from the array.
+ if (0 < descriptor_count) {
+ for (int i = 0; i < descriptor_count; i++) {
+ result->CopyFrom(i, *array, i, witness);
+ }
+ }
+
+ // After this point the GC is not allowed to run anymore until the map is in a
+ // consistent state again, i.e., all the descriptors are appended and the
+ // descriptor array is trimmed to the right size.
+ map->set_instance_descriptors(*result);
+
+ // Fill in new callback descriptors. Process the callbacks from
+ // back to front so that the last callback with a given name takes
+ // precedence over previously added callbacks with that name.
+ for (int i = nof_callbacks - 1; i >= 0; i--) {
+ AccessorInfo* entry = AccessorInfo::cast(callbacks.get(i));
+ String* key = String::cast(entry->name());
+ // Check if a descriptor with this name already exists before writing.
+ if (LinearSearch(*result, key, map->NumberOfSetDescriptors()) ==
+ DescriptorArray::kNotFound) {
+ CallbacksDescriptor desc(key, entry, entry->property_attributes());
+ map->AppendDescriptor(&desc, witness);
+ }
+ }
+
+ int new_number_of_descriptors = map->NumberOfSetDescriptors();
+ // Reinstall the original descriptor array if no new elements were added.
+ if (new_number_of_descriptors == descriptor_count) {
+ map->set_instance_descriptors(*array);
+ return;
+ }
+
+ // If duplicates were detected, trim the descriptor array to the right size.
+ int new_array_size = DescriptorArray::SizeFor(new_number_of_descriptors);
+ if (new_array_size < result->length()) {
+ RightTrimFixedArray<FROM_MUTATOR>(
+ isolate->heap(), *result, result->length() - new_array_size);
+ }
+}
+
+
static bool ContainsMap(MapHandleList* maps, Handle<Map> map) {
ASSERT(!map.is_null());
for (int i = 0; i < maps->length(); ++i) {
@@ -5723,10 +5843,9 @@
return heap->empty_descriptor_array();
}
// Allocate the array of keys.
- { MaybeObject* maybe_array =
- heap->AllocateFixedArray(ToKeyIndex(number_of_descriptors));
- if (!maybe_array->To(&result)) return maybe_array;
- }
+ MaybeObject* maybe_array =
+ heap->AllocateFixedArray(SizeFor(number_of_descriptors));
+ if (!maybe_array->To(&result)) return maybe_array;
result->set(kEnumCacheIndex, Smi::FromInt(0));
result->set(kTransitionsIndex, Smi::FromInt(0));
@@ -7083,46 +7202,6 @@
}
-// This function should only be called from within the GC, since it uses
-// IncrementLiveBytesFromGC. If called from anywhere else, this results in an
-// inconsistent live-bytes count.
-static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
- ASSERT(elms->map() != HEAP->fixed_cow_array_map());
- // For now this trick is only applied to fixed arrays in new and paged space.
- // In large object space the object's start must coincide with chunk
- // and thus the trick is just not applicable.
- ASSERT(!HEAP->lo_space()->Contains(elms));
-
- const int len = elms->length();
-
- ASSERT(to_trim < len);
-
- Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
-
-#ifdef DEBUG
- // If we are doing a big trim in old space then we zap the space.
- Object** zap = reinterpret_cast<Object**>(new_end);
- for (int i = 1; i < to_trim; i++) {
- *zap++ = Smi::FromInt(0);
- }
-#endif
-
- int size_delta = to_trim * kPointerSize;
-
- // Technically in new space this write might be omitted (except for
- // debug mode which iterates through the heap), but to play safer
- // we still do it.
- heap->CreateFillerObjectAt(new_end, size_delta);
-
- elms->set_length(len - to_trim);
-
- // Maintain marking consistency for IncrementalMarking.
- if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
- MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
- }
-}
-
-
// Clear a possible back pointer in case the transition leads to a dead map.
// Return true in case a back pointer has been cleared and false otherwise.
static bool ClearBackPointer(Heap* heap, Object* target) {
@@ -7185,7 +7264,8 @@
int trim = t->number_of_transitions() - transition_index;
if (trim > 0) {
- RightTrimFixedArray(heap, t, trim * TransitionArray::kTransitionSize);
+ RightTrimFixedArray<FROM_GC>(
+ heap, t, trim * TransitionArray::kTransitionSize);
}
}
diff --git a/src/objects.h b/src/objects.h
index 92c2bc5..3694955 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2635,6 +2635,10 @@
// fit in a page).
static const int kMaxNumberOfDescriptors = 1024 + 512;
+ static int SizeFor(int number_of_descriptors) {
+ return ToKeyIndex(number_of_descriptors);
+ }
+
private:
// An entry in a DescriptorArray, represented as an (array, index) pair.
class Entry {
@@ -4961,6 +4965,11 @@
Handle<Code> code);
MUST_USE_RESULT MaybeObject* UpdateCodeCache(String* name, Code* code);
+ // Extend the descriptor array of the map with the list of descriptors.
+ // In case of duplicates, the latest descriptor is used.
+ static void CopyAppendCallbackDescriptors(Handle<Map> map,
+ Handle<Object> descriptors);
+
// Returns the found code or undefined if absent.
Object* FindInCodeCache(String* name, Code::Flags flags);
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 1856359..e0e9812 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -34,6 +34,7 @@
#include "compilation-cache.h"
#include "deoptimizer.h"
#include "execution.h"
+#include "full-codegen.h"
#include "global-handles.h"
#include "isolate-inl.h"
#include "mark-compact.h"
@@ -81,7 +82,8 @@
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
-static const int kMaxSizeEarlyOpt = 500;
+static const int kMaxSizeEarlyOpt =
+ 5 * FullCodeGenerator::kBackEdgeDistanceUnit;
Atomic32 RuntimeProfiler::state_ = 0;
@@ -317,8 +319,6 @@
}
if (!function->IsOptimizable()) continue;
-
-
if (FLAG_watch_ic_patching) {
int ticks = shared_code->profiler_ticks();
@@ -341,7 +341,7 @@
}
}
} else if (!any_ic_changed_ &&
- shared_code->instruction_size() < kMaxSizeEarlyOpt) {
+ shared_code->instruction_size() < kMaxSizeEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
Optimize(function, "small function");
diff --git a/src/version.cc b/src/version.cc
index 03c020b..3b429e4 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 12
-#define BUILD_NUMBER 15
+#define BUILD_NUMBER 16
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 3eb6493..344905e 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -321,10 +321,6 @@
}
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 162;
-
-
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
@@ -336,7 +332,7 @@
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
@@ -392,7 +388,7 @@
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
- Max(1, distance = kBackEdgeDistanceDivisor));
+ Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
Label ok;