Version 2.2.22
Added ES5 Object.isExtensible and Object.preventExtensions.
Enabled building V8 as a DLL.
Fixed a bug in date code where -0 was not interpreted as 0 (issue 736).
Performance improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@5017 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.h b/src/api.h
index e7b1394..5c67136 100644
--- a/src/api.h
+++ b/src/api.h
@@ -134,16 +134,6 @@
};
-v8::Arguments::Arguments(v8::Local<v8::Value> data,
- v8::Local<v8::Object> holder,
- v8::Local<v8::Function> callee,
- bool is_construct_call,
- void** values, int length)
- : data_(data), holder_(holder), callee_(callee),
- is_construct_call_(is_construct_call),
- values_(values), length_(length) { }
-
-
enum ExtensionTraversalState {
UNVISITED, VISITED, INSTALLED
};
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index c8170b3..f5ff43a 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1802,6 +1802,16 @@
void Assembler::vmov(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond) {
+ // Dd = Dm
+ // Instruction details available in ARM DDI 0406B, A8-642.
+ emit(cond | 0xE*B24 | 0xB*B20 |
+ dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
+}
+
+
+void Assembler::vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
const Condition cond) {
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 8a4173d..6a4fb23 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -930,6 +930,10 @@
const Register base,
int offset, // Offset must be a multiple of 4.
const Condition cond = al);
+
+ void vmov(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index f923c09..4d18727 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -748,37 +748,43 @@
JumpTarget* false_target) {
// Note: The generated code snippet does not change stack variables.
// Only the condition code should be set.
+ bool known_smi = frame_->KnownSmiAt(0);
Register tos = frame_->PopToRegister();
// Fast case checks
// Check if the value is 'false'.
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(tos, ip);
- false_target->Branch(eq);
+ if (!known_smi) {
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(tos, ip);
+ false_target->Branch(eq);
- // Check if the value is 'true'.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(tos, ip);
- true_target->Branch(eq);
+ // Check if the value is 'true'.
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(tos, ip);
+ true_target->Branch(eq);
- // Check if the value is 'undefined'.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(tos, ip);
- false_target->Branch(eq);
+ // Check if the value is 'undefined'.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(tos, ip);
+ false_target->Branch(eq);
+ }
// Check if the value is a smi.
__ cmp(tos, Operand(Smi::FromInt(0)));
- false_target->Branch(eq);
- __ tst(tos, Operand(kSmiTagMask));
- true_target->Branch(eq);
- // Slow case: call the runtime.
- frame_->EmitPush(tos);
- frame_->CallRuntime(Runtime::kToBool, 1);
- // Convert the result (r0) to a condition code.
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
+ if (!known_smi) {
+ false_target->Branch(eq);
+ __ tst(tos, Operand(kSmiTagMask));
+ true_target->Branch(eq);
+
+ // Slow case: call the runtime.
+ frame_->EmitPush(tos);
+ frame_->CallRuntime(Runtime::kToBool, 1);
+ // Convert the result (r0) to a condition code.
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r0, ip);
+ }
cc_reg_ = ne;
}
@@ -1745,11 +1751,15 @@
val = node->fun(); // NULL if we don't have a function
}
+
if (val != NULL) {
+ WriteBarrierCharacter wb_info =
+ val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
+ if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
// Set initial value.
Reference target(this, node->proxy());
Load(val);
- target.SetValue(NOT_CONST_INIT);
+ target.SetValue(NOT_CONST_INIT, wb_info);
// Get rid of the assigned value (declarations are statements).
frame_->Drop();
@@ -2485,13 +2495,13 @@
if (each.size() > 0) {
__ ldr(r0, frame_->ElementAt(each.size()));
frame_->EmitPush(r0);
- each.SetValue(NOT_CONST_INIT);
+ each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
frame_->Drop(2);
} else {
// If the reference was to a slot we rely on the convenient property
// that it doesn't matter whether a value (eg, r3 pushed above) is
// right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
+ each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
frame_->Drop();
}
}
@@ -3646,6 +3656,8 @@
// Evaluate the receiver subexpression.
Load(prop->obj());
+ WriteBarrierCharacter wb_info;
+
// Change to slow case in the beginning of an initialization block to
// avoid the quadratic behavior of repeatedly adding fast properties.
if (node->starts_initialization_block()) {
@@ -3667,7 +3679,7 @@
// [tos] : key
// [tos+1] : receiver
// [tos+2] : receiver if at the end of an initialization block
-
+ //
// Evaluate the right-hand side.
if (node->is_compound()) {
// For a compound assignment the right-hand side is a binary operation
@@ -3699,9 +3711,13 @@
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
inline_smi);
}
+ wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
} else {
// For non-compound assignment just load the right-hand side.
Load(node->value());
+ wb_info = node->value()->AsLiteral() != NULL ?
+ NEVER_NEWSPACE :
+ (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
}
// Stack layout:
@@ -3713,7 +3729,7 @@
// Perform the assignment. It is safe to ignore constants here.
ASSERT(node->op() != Token::INIT_CONST);
CodeForSourcePosition(node->position());
- EmitKeyedStore(prop->key()->type());
+ EmitKeyedStore(prop->key()->type(), wb_info);
frame_->EmitPush(r0);
// Stack layout:
@@ -4291,7 +4307,7 @@
} else {
CpuFeatures::Scope scope(VFP3);
JumpTarget runtime, done;
- Label not_minus_half, allocate_return;
+ Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
@@ -4299,18 +4315,74 @@
// Get base and exponent to registers.
Register exponent = frame_->PopToRegister();
Register base = frame_->PopToRegister(exponent);
+ Register heap_number_map = no_reg;
// Set the frame for the runtime jump target. The code below jumps to the
// jump target label so the frame needs to be established before that.
ASSERT(runtime.entry_frame() == NULL);
runtime.set_entry_frame(frame_);
- __ BranchOnSmi(exponent, runtime.entry_label());
+ __ BranchOnNotSmi(exponent, &exponent_nonsmi);
+ __ BranchOnNotSmi(base, &base_nonsmi);
+ heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ // Exponent is a smi and base is a smi. Get the smi value into vfp register
+ // d1.
+ __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
+ __ b(&powi);
+
+ __ bind(&base_nonsmi);
+ // Exponent is smi and base is non smi. Get the double value from the base
+ // into vfp register d1.
+ __ ObjectToDoubleVFPRegister(base, d1,
+ scratch1, scratch2, heap_number_map, s0,
+ runtime.entry_label());
+
+ __ bind(&powi);
+
+ // Load 1.0 into d0.
+ __ mov(scratch2, Operand(0x3ff00000));
+ __ mov(scratch1, Operand(0));
+ __ vmov(d0, scratch1, scratch2);
+
+ // Get the absolute untagged value of the exponent and use that for the
+ // calculation.
+ __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
+ __ rsb(scratch1, scratch1, Operand(0), LeaveCC, mi); // Negate if negative.
+ __ vmov(d2, d0, mi); // 1.0 needed in d2 later if exponent is negative.
+
+ // Run through all the bits in the exponent. The result is calculated in d0
+ // and d1 holds base^(bit^2).
+ Label more_bits;
+ __ bind(&more_bits);
+ __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
+ __ vmul(d0, d0, d1, cs); // Multiply with base^(bit^2) if bit is set.
+ __ vmul(d1, d1, d1, ne); // Don't bother calculating next d1 if done.
+ __ b(ne, &more_bits);
+
+ // If exponent is positive we are done.
+ __ cmp(exponent, Operand(0));
+ __ b(ge, &allocate_return);
+
+ // If exponent is negative result is 1/result (d2 already holds 1.0 in that
+ // case). However if d0 has reached infinity this will not provide the
+ // correct result, so call runtime if that is the case.
+ __ mov(scratch2, Operand(0x7FF00000));
+ __ mov(scratch1, Operand(0));
+ __ vmov(d1, scratch1, scratch2); // Load infinity into d1.
+ __ vcmp(d0, d1);
+ __ vmrs(pc);
+ runtime.Branch(eq); // d0 reached infinity.
+ __ vdiv(d0, d2, d0);
+ __ b(&allocate_return);
+
+ __ bind(&exponent_nonsmi);
// Special handling of raising to the power of -0.5 and 0.5. First check
// that the value is a heap number and that the lower bits (which for both
// values are zero).
- Register heap_number_map = r6;
+ heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
__ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
@@ -4319,7 +4391,7 @@
__ tst(scratch2, scratch2);
runtime.Branch(ne);
- // Load the e
+ // Load the higher bits (which contains the floating point exponent).
__ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));
// Compare exponent with -0.5.
@@ -4356,8 +4428,10 @@
__ vsqrt(d0, d0);
__ bind(&allocate_return);
- __ AllocateHeapNumberWithValue(
- base, d0, scratch1, scratch2, heap_number_map, runtime.entry_label());
+ Register scratch3 = r5;
+ __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
+ heap_number_map, runtime.entry_label());
+ __ mov(base, scratch3);
done.Jump();
runtime.Bind();
@@ -5451,7 +5525,7 @@
__ sub(value, value, Operand(Smi::FromInt(1)));
}
frame_->EmitPush(value);
- target.SetValue(NOT_CONST_INIT);
+ target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
if (is_postfix) frame_->Pop();
ASSERT_EQ(original_height + 1, frame_->height());
return;
@@ -5550,7 +5624,7 @@
// Set the target with the result, leaving the result on
// top of the stack. Removes the target from the stack if
// it has a non-zero size.
- if (!is_const) target.SetValue(NOT_CONST_INIT);
+ if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
}
// Postfix: Discard the new value and use the old.
@@ -6283,7 +6357,8 @@
}
-void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
+void CodeGenerator::EmitKeyedStore(StaticType* key_type,
+ WriteBarrierCharacter wb_info) {
// Generate inlined version of the keyed store if the code is in a loop
// and the key is likely to be a smi.
if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
@@ -6299,25 +6374,45 @@
__ IncrementCounter(&Counters::keyed_store_inline, 1,
scratch1, scratch2);
+
+
// Load the value, key and receiver from the stack.
+ bool value_is_harmless = frame_->KnownSmiAt(0);
+ if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
+ bool key_is_smi = frame_->KnownSmiAt(1);
Register value = frame_->PopToRegister();
Register key = frame_->PopToRegister(value);
VirtualFrame::SpilledScope spilled(frame_);
Register receiver = r2;
frame_->EmitPop(receiver);
+#ifdef DEBUG
+ bool we_remembered_the_write_barrier = value_is_harmless;
+#endif
+
// The deferred code expects value, key and receiver in registers.
DeferredReferenceSetKeyedValue* deferred =
new DeferredReferenceSetKeyedValue(value, key, receiver);
// Check that the value is a smi. As this inlined code does not set the
// write barrier it is only possible to store smi values.
- __ tst(value, Operand(kSmiTagMask));
- deferred->Branch(ne);
+ if (!value_is_harmless) {
+ // If the value is not likely to be a Smi then let's test the fixed array
+ // for new space instead. See below.
+ if (wb_info == LIKELY_SMI) {
+ __ tst(value, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+#ifdef DEBUG
+ we_remembered_the_write_barrier = true;
+#endif
+ }
+ }
- // Check that the key is a smi.
- __ tst(key, Operand(kSmiTagMask));
- deferred->Branch(ne);
+ if (!key_is_smi) {
+ // Check that the key is a smi.
+ __ tst(key, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ }
// Check that the receiver is a heap object.
__ tst(receiver, Operand(kSmiTagMask));
@@ -6333,24 +6428,35 @@
__ cmp(scratch1, key);
deferred->Branch(ls); // Unsigned less equal.
+ // Get the elements array from the receiver.
+ __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (!value_is_harmless && wb_info != LIKELY_SMI) {
+ Label ok;
+ __ and_(scratch2, scratch1, Operand(ExternalReference::new_space_mask()));
+ __ cmp(scratch2, Operand(ExternalReference::new_space_start()));
+ __ tst(value, Operand(kSmiTagMask), ne);
+ deferred->Branch(ne);
+#ifdef DEBUG
+ we_remembered_the_write_barrier = true;
+#endif
+ }
+ // Check that the elements array is not a dictionary.
+ __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
// The following instructions are the part of the inlined store keyed
// property code which can be patched. Therefore the exact number of
// instructions generated need to be fixed, so the constant pool is blocked
// while generating this code.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
- __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
+#ifdef DEBUG
+ Label check_inlined_codesize;
+ masm_->bind(&check_inlined_codesize);
+#endif
+
// Read the fixed array map from the constant pool (not from the root
// array) so that the value can be patched. When debugging, we patch this
// comparison to always fail so that we will hit the IC call in the
// deferred code which will allow the debugger to break for fast case
// stores.
-#ifdef DEBUG
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
__ mov(scratch3, Operand(Factory::fixed_array_map()));
__ cmp(scratch2, scratch3);
deferred->Branch(ne);
@@ -6367,6 +6473,8 @@
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
}
+ ASSERT(we_remembered_the_write_barrier);
+
deferred->BindExit();
} else {
frame()->CallKeyedStoreIC();
@@ -6464,7 +6572,7 @@
}
-void Reference::SetValue(InitState init_state) {
+void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
ASSERT(!is_illegal());
ASSERT(!cgen_->has_cc());
MacroAssembler* masm = cgen_->masm();
@@ -6496,7 +6604,7 @@
Property* property = expression_->AsProperty();
ASSERT(property != NULL);
cgen_->CodeForSourcePosition(property->position());
- cgen_->EmitKeyedStore(property->key()->type());
+ cgen_->EmitKeyedStore(property->key()->type(), wb_info);
frame->EmitPush(r0);
set_unloaded();
break;
@@ -7170,22 +7278,42 @@
// Fast negative check for symbol-to-symbol equality.
-static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
+static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
+ Label* possible_strings,
+ Label* not_both_strings) {
// r2 is object type of r0.
// Ensure that no non-strings have the symbol bit set.
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ Label object_test;
ASSERT(kSymbolTag != 0);
+ __ tst(r2, Operand(kIsNotStringMask));
+ __ b(ne, &object_test);
__ tst(r2, Operand(kIsSymbolMask));
- __ b(eq, slow);
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ __ b(eq, possible_strings);
+ __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
+ __ b(ge, not_both_strings);
__ tst(r3, Operand(kIsSymbolMask));
- __ b(eq, slow);
+ __ b(eq, possible_strings);
// Both are symbols. We already checked they weren't the same pointer
// so they are not equal.
__ mov(r0, Operand(1)); // Non-zero indicates not equal.
__ mov(pc, Operand(lr)); // Return.
+
+ __ bind(&object_test);
+ __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, not_both_strings);
+ __ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, not_both_strings);
+ // If both objects are undetectable, they are equal. Otherwise, they
+ // are not equal, since they are different objects and an object is not
+ // equal to undefined.
+ __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ and_(r0, r2, Operand(r3));
+ __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
+ __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
+ __ mov(pc, Operand(lr)); // Return.
}
@@ -7301,7 +7429,8 @@
void RecordWriteStub::Generate(MacroAssembler* masm) {
- __ RecordWriteHelper(object_, Operand(offset_), offset_, scratch_);
+ __ add(offset_, object_, Operand(offset_));
+ __ RecordWriteHelper(object_, offset_, scratch_);
__ Ret();
}
@@ -7398,9 +7527,10 @@
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
// symbols.
if (cc_ == eq && !strict_) {
- // Either jumps to slow or returns the answer. Assumes that r2 is the type
- // of r0 on entry.
- EmitCheckForSymbols(masm, &flat_string_check);
+ // Returns an answer for two symbols or two detectable objects.
+ // Otherwise jumps to string case or not both strings case.
+ // Assumes that r2 is the type of r0 on entry.
+ EmitCheckForSymbolsOrObjects(masm, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
@@ -7913,7 +8043,11 @@
// The code below for writing into heap numbers isn't capable of writing
// the register as an unsigned int so we go to slow case if we hit this
// case.
- __ b(mi, &slow);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ __ b(mi, &result_not_a_smi);
+ } else {
+ __ b(mi, &slow);
+ }
break;
case Token::SHL:
// Use only the 5 least significant bits of the shift count.
@@ -7957,10 +8091,24 @@
// result.
__ mov(r0, Operand(r5));
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r2);
+ if (op_ == Token::SHR) {
+ __ vcvt_f64_u32(d0, s0);
+ } else {
+ __ vcvt_f64_s32(d0, s0);
+ }
+ __ sub(r3, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in r2 to the heap number in r0, using
+ // r3 as scratch. r0 is preserved and returned.
+ WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+ __ TailCallStub(&stub);
+ }
if (mode_ != NO_OVERWRITE) {
__ bind(&have_to_allocate);
@@ -8809,12 +8957,21 @@
__ mov(r0, Operand(r2));
}
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(r1, r0, r2);
- __ push(lr);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r1);
+ __ vcvt_f64_s32(d0, s0);
+ __ sub(r2, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
+ } else {
+ // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+ // have to set up a frame.
+ WriteInt32ToHeapNumberStub stub(r1, r0, r2);
+ __ push(lr);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
} else {
UNIMPLEMENTED();
}
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 2d8a935..855723d 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -44,6 +44,7 @@
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
+enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
// -------------------------------------------------------------------------
@@ -100,7 +101,7 @@
// on the expression stack. The value is stored in the location specified
// by the reference, and is left on top of the stack, after the reference
// is popped from beneath it (unloaded).
- void SetValue(InitState init_state);
+ void SetValue(InitState init_state, WriteBarrierCharacter wb);
// This is in preparation for something that uses the reference on the stack.
// If we need this reference afterwards get then dup it now. Otherwise mark
@@ -384,7 +385,7 @@
// Store a keyed property. Key and receiver are on the stack and the value is
// in r0. Result is returned in r0.
- void EmitKeyedStore(StaticType* key_type);
+ void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index fb17d45..a52417b 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1047,7 +1047,14 @@
if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) {
// Other data processing instructions
- if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+ if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
+ // vmov register to register.
+ if (instr->SzField() == 0x1) {
+ Format(instr, "vmov.f64'cond 'Dd, 'Dm");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 0af1036..97e6148 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -64,12 +64,12 @@
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
-static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register elements,
- Register t0,
- Register t1,
- Label* miss) {
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register t0,
+ Register t1,
+ Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// elements: holds the property dictionary on fall through.
@@ -105,33 +105,16 @@
}
-// Helper function used from LoadIC/CallIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as elements or name clobbering
-// one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
-
- Label done;
-
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+static void GenerateStringDictionaryProbes(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
// Compute the capacity mask.
const int kCapacityOffset = StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
@@ -170,16 +153,56 @@
__ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
__ cmp(name, Operand(ip));
if (i != kProbes - 1) {
- __ b(eq, &done);
+ __ b(eq, done);
} else {
__ b(ne, miss);
}
}
+}
- // Check that the value is a normal property.
+
+// Helper function used from LoadIC/CallIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done. Can be the same as elements or name clobbering
+// one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register result,
+ Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ GenerateStringDictionaryProbes(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry check that the value is a normal
+ // property.
__ bind(&done); // scratch2 == elements + 4 * index
- __ ldr(scratch1,
- FieldMemOperand(scratch2, kElementsStartOffset + 2 * kPointerSize));
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
__ tst(scratch1, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ b(ne, miss);
@@ -189,6 +212,63 @@
}
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register value,
+ Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ GenerateStringDictionaryProbes(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ bind(&done); // scratch2 == elements + 4 * index
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask
+ = (PropertyDetails::TypeField::mask() |
+ PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
+ __ b(ne, miss);
+
+ // Store the value at the masked, scaled index and return.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+ __ str(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ mov(scratch1, value);
+ __ RecordWrite(elements, scratch2, scratch1);
+}
+
+
static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
@@ -560,7 +640,7 @@
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
- GenerateDictionaryLoadReceiverCheck(masm, r1, r0, r3, r4, &miss);
+ GenerateStringDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
// r0: elements
// Search the dictionary - put result in register r1.
@@ -815,7 +895,7 @@
// -----------------------------------
Label miss;
- GenerateDictionaryLoadReceiverCheck(masm, r0, r1, r3, r4, &miss);
+ GenerateStringDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
// r1: elements
GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
@@ -2138,6 +2218,27 @@
}
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
+
+ GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
+ __ IncrementCounter(&Counters::store_normal_hit, 1, r4, r5);
+ __ Ret();
+
+ __ bind(&miss);
+ __ IncrementCounter(&Counters::store_normal_miss, 1, r4, r5);
+ GenerateMiss(masm);
+}
+
+
#undef __
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 630e0b8..81fc11e 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -310,32 +310,28 @@
void MacroAssembler::RecordWriteHelper(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1) {
+ Register address,
+ Register scratch) {
if (FLAG_debug_code) {
// Check that the object is not in new space.
Label not_in_new_space;
- InNewSpace(object, scratch1, ne, ¬_in_new_space);
+ InNewSpace(object, scratch, ne, ¬_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(¬_in_new_space);
}
- // Add offset into the object.
- add(scratch0, object, offset);
-
// Calculate page address.
Bfc(object, 0, kPageSizeBits);
// Calculate region number.
- Ubfx(scratch0, scratch0, Page::kRegionSizeLog2,
+ Ubfx(address, address, Page::kRegionSizeLog2,
kPageSizeBits - Page::kRegionSizeLog2);
// Mark region dirty.
- ldr(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
+ ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
- orr(scratch1, scratch1, Operand(ip, LSL, scratch0));
- str(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
+ orr(scratch, scratch, Operand(ip, LSL, address));
+ str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
}
@@ -368,8 +364,11 @@
// region marks for new space pages.
InNewSpace(object, scratch0, eq, &done);
+ // Add offset into the object.
+ add(scratch0, object, offset);
+
// Record the actual write.
- RecordWriteHelper(object, offset, scratch0, scratch1);
+ RecordWriteHelper(object, scratch0, scratch1);
bind(&done);
@@ -383,6 +382,38 @@
}
+// Will clobber 4 registers: object, address, scratch, ip. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
+ Register scratch) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+
+ Label done;
+
+ // First, test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ InNewSpace(object, scratch, eq, &done);
+
+ // Record the actual write.
+ RecordWriteHelper(object, address, scratch);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ mov(object, Operand(BitCast<int32_t>(kZapValue)));
+ mov(address, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
void MacroAssembler::Ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
ASSERT(src.rm().is(no_reg));
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index c3f45a6..d57c565 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -137,22 +137,32 @@
Label* branch);
- // For the page containing |object| mark the region covering [object+offset]
+ // For the page containing |object| mark the region covering [address]
// dirty. The object address must be in the first 8K of an allocated page.
void RecordWriteHelper(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1);
+ Register address,
+ Register scratch);
- // For the page containing |object| mark the region covering [object+offset]
- // dirty. The object address must be in the first 8K of an allocated page.
- // The 'scratch' registers are used in the implementation and all 3 registers
- // are clobbered by the operation, as well as the ip register.
+ // For the page containing |object| mark the region covering
+ // [object+offset] dirty. The object address must be in the first 8K
+ // of an allocated page. The 'scratch' registers are used in the
+ // implementation and all 3 registers are clobbered by the
+ // operation, as well as the ip register. RecordWrite updates the
+ // write barrier even when storing smis.
void RecordWrite(Register object,
Operand offset,
Register scratch0,
Register scratch1);
+ // For the page containing |object| mark the region covering
+ // [address] dirty. The object address must be in the first 8K of an
+ // allocated page. All 3 registers are clobbered by the operation,
+ // as well as the ip register. RecordWrite updates the write barrier
+ // even when storing smis.
+ void RecordWrite(Register object,
+ Register address,
+ Register scratch);
+
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
ASSERT(!src1.is(src2));
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 6240cd4..f09ce00 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2276,7 +2276,14 @@
if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) {
// Other data processing instructions
- if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+ if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
+ // vmov register to register.
+ if (instr->SzField() == 0x1) {
+ set_d_register_from_double(vd, get_double_from_d_register(vm));
+ } else {
+ UNREACHABLE(); // Not used by V8.
+ }
+ } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 3e5ba11..0e649cc 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -741,7 +741,8 @@
Register scratch,
String* name,
int save_at_depth,
- Label* miss) {
+ Label* miss,
+ Register extra) {
// Check that the maps haven't changed.
Register result =
masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,
diff --git a/src/builtins.cc b/src/builtins.cc
index c8d4e09..ad52ea1 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1263,6 +1263,11 @@
}
+static void Generate_StoreIC_Normal(MacroAssembler* masm) {
+ StoreIC::GenerateNormal(masm);
+}
+
+
static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
StoreIC::GenerateMegamorphic(masm);
}
diff --git a/src/builtins.h b/src/builtins.h
index 1fab375..3dcab62 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -98,6 +98,7 @@
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \
V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC) \
+ V(StoreIC_Normal, STORE_IC, MONOMORPHIC) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \
diff --git a/src/date.js b/src/date.js
index e780cb8..83fca27 100644
--- a/src/date.js
+++ b/src/date.js
@@ -347,9 +347,10 @@
function MakeDay(year, month, date) {
if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
- year = TO_INTEGER(year);
- month = TO_INTEGER(month);
- date = TO_INTEGER(date);
+ // Convert to integer and map -0 to 0.
+ year = TO_INTEGER_MAP_MINUS_ZERO(year);
+ month = TO_INTEGER_MAP_MINUS_ZERO(month);
+ date = TO_INTEGER_MAP_MINUS_ZERO(date);
if (year < kMinYear || year > kMaxYear ||
month < kMinMonth || month > kMaxMonth ||
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index c808c87..47a3c8e 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -2070,6 +2070,7 @@
return response.failed('Missing arguments');
}
var script_id = request.arguments.script_id;
+ var preview_only = !!request.arguments.preview_only;
var scripts = %DebugGetLoadedScripts();
@@ -2092,18 +2093,9 @@
var new_source = request.arguments.new_source;
- try {
- Debug.LiveEdit.SetScriptSource(the_script, new_source, change_log);
- } catch (e) {
- if (e instanceof Debug.LiveEdit.Failure) {
- // Let's treat it as a "success" so that body with change_log will be
- // sent back. "change_log" will have "failure" field set.
- change_log.push( { failure: true, message: e.toString() } );
- } else {
- throw e;
- }
- }
- response.body = {change_log: change_log};
+ var result_description = Debug.LiveEdit.SetScriptSource(the_script,
+ new_source, preview_only, change_log);
+ response.body = {change_log: change_log, result: result_description};
};
diff --git a/src/debug.cc b/src/debug.cc
index d513b31..1dc6275 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -684,6 +684,12 @@
void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
+ // We need to clear all breakpoints associated with the function to restore
+ // original code and avoid patching the code twice later because
+ // the function will live in the heap until next gc, and can be found by
+ // Runtime::FindSharedFunctionInfoInScript.
+ BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
+ it.ClearAllDebugBreak();
RemoveDebugInfo(node->debug_info());
#ifdef DEBUG
node = Debug::debug_info_list_;
@@ -854,7 +860,7 @@
HandleScope scope;
ASSERT(args.length() == 0);
- thread_local_.frames_are_dropped_ = false;
+ thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it;
@@ -932,12 +938,22 @@
PrepareStep(step_action, step_count);
}
- if (thread_local_.frames_are_dropped_) {
- // We must have been calling IC stub. Do not return there anymore.
+ if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
+ SetAfterBreakTarget(frame);
+ } else if (thread_local_.frame_drop_mode_ == FRAME_DROPPED_IN_IC_CALL) {
+ // We must have been calling IC stub. Do not go there anymore.
Code* plain_return = Builtins::builtin(Builtins::PlainReturn_LiveEdit);
thread_local_.after_break_target_ = plain_return->entry();
+ } else if (thread_local_.frame_drop_mode_ ==
+ FRAME_DROPPED_IN_DEBUG_SLOT_CALL) {
+ // Debug break slot stub does not return normally, instead it manually
+ // cleans the stack and jumps. We should patch the jump address.
+ Code* plain_return = Builtins::builtin(Builtins::FrameDropper_LiveEdit);
+ thread_local_.after_break_target_ = plain_return->entry();
+ } else if (thread_local_.frame_drop_mode_ == FRAME_DROPPED_IN_DIRECT_CALL) {
+ // Nothing to do, after_break_target is not used here.
} else {
- SetAfterBreakTarget(frame);
+ UNREACHABLE();
}
return Heap::undefined_value();
@@ -1749,8 +1765,9 @@
}
-void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id) {
- thread_local_.frames_are_dropped_ = true;
+void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
+ FrameDropMode mode) {
+ thread_local_.frame_drop_mode_ = mode;
thread_local_.break_frame_id_ = new_break_frame_id;
}
diff --git a/src/debug.h b/src/debug.h
index 6019294..fb92692 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -400,7 +400,22 @@
// Called from stub-cache.cc.
static void GenerateCallICDebugBreak(MacroAssembler* masm);
- static void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id);
+ // Describes how exactly a frame has been dropped from stack.
+ enum FrameDropMode {
+ // No frame has been dropped.
+ FRAMES_UNTOUCHED,
+ // The top JS frame had been calling IC stub. IC stub mustn't be called now.
+ FRAME_DROPPED_IN_IC_CALL,
+ // The top JS frame had been calling debug break slot stub. Patch the
+ // address this stub jumps to in the end.
+ FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
+ // The top JS frame had been calling some C++ function. The return address
+ // gets patched automatically.
+ FRAME_DROPPED_IN_DIRECT_CALL
+ };
+
+ static void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
+ FrameDropMode mode);
static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Handle<Code> code);
@@ -471,8 +486,9 @@
// Storage location for jump when exiting debug break calls.
Address after_break_target_;
- // Indicates that LiveEdit has patched the stack.
- bool frames_are_dropped_;
+ // Stores the way how LiveEdit has patched the stack. It is used when
+ // debugger returns control back to user script.
+ FrameDropMode frame_drop_mode_;
// Top debugger entry.
EnterDebugger* debugger_entry_;
diff --git a/src/globals.h b/src/globals.h
index 6cf2626..aea8858 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -463,6 +463,12 @@
};
+enum InlineCacheHolderFlag {
+ OWN_MAP, // For fast properties objects.
+ PROTOTYPE_MAP // For slow properties objects (except GlobalObjects).
+};
+
+
// Type of properties.
// Order of properties is significant.
// Must fit in the BitField PropertyDetails::TypeField.
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 1a847c1..fa09dd8 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -11656,7 +11656,7 @@
void CompareStub::Generate(MacroAssembler* masm) {
- Label call_builtin, done;
+ Label check_unequal_objects, done;
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
@@ -11689,15 +11689,15 @@
Label heap_number;
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
- if (cc_ == equal) {
- __ j(equal, &heap_number);
- // Identical objects are equal for operators ==, !=, and ===.
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- } else {
- // Identical objects must call ToPrimitive for <, <=, >, and >=.
- __ j(not_equal, ¬_identical);
+ __ j(equal, &heap_number);
+ if (cc_ != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(above_equal, ¬_identical);
}
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
__ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if
// it's not NaN.
@@ -11734,79 +11734,75 @@
__ bind(¬_identical);
}
- if (cc_ == equal) { // Both strict and non-strict.
+ // Strict equality can quickly decide whether objects are equal.
+ // Non-strict object equality is slower, so it is handled later in the stub.
+ if (cc_ == equal && strict_) {
Label slow; // Fallthrough label.
-
+ Label not_smis;
// If we're doing a strict equality comparison, we don't have to do
// type conversion, so we generate code to do fast comparison for objects
// and oddballs. Non-smi numbers and strings still go through the usual
// slow-case code.
- if (strict_) {
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- {
- Label not_smis;
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ mov(ecx, Immediate(kSmiTagMask));
- __ and_(ecx, Operand(eax));
- __ test(ecx, Operand(edx));
- __ j(not_zero, ¬_smis);
- // One operand is a smi.
+ // If either is a Smi (we know that not both are), then they can only
+ // be equal if the other is a HeapNumber. If so, use the slow case.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ mov(ecx, Immediate(kSmiTagMask));
+ __ and_(ecx, Operand(eax));
+ __ test(ecx, Operand(edx));
+ __ j(not_zero, ¬_smis);
+ // One operand is a smi.
- // Check whether the non-smi is a heap number.
- ASSERT_EQ(1, kSmiTagMask);
- // ecx still holds eax & kSmiTag, which is either zero or one.
- __ sub(Operand(ecx), Immediate(0x01));
- __ mov(ebx, edx);
- __ xor_(ebx, Operand(eax));
- __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
- __ xor_(ebx, Operand(eax));
- // if eax was smi, ebx is now edx, else eax.
+ // Check whether the non-smi is a heap number.
+ ASSERT_EQ(1, kSmiTagMask);
+ // ecx still holds eax & kSmiTag, which is either zero or one.
+ __ sub(Operand(ecx), Immediate(0x01));
+ __ mov(ebx, edx);
+ __ xor_(ebx, Operand(eax));
+ __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
+ __ xor_(ebx, Operand(eax));
+ // if eax was smi, ebx is now edx, else eax.
- // Check if the non-smi operand is a heap number.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- // If heap number, handle it in the slow case.
- __ j(equal, &slow);
- // Return non-equal (ebx is not zero)
- __ mov(eax, ebx);
- __ ret(0);
+ // Check if the non-smi operand is a heap number.
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ // If heap number, handle it in the slow case.
+ __ j(equal, &slow);
+ // Return non-equal (ebx is not zero)
+ __ mov(eax, ebx);
+ __ ret(0);
- __ bind(¬_smis);
- }
+ __ bind(¬_smis);
+ // If either operand is a JSObject or an oddball value, then they are not
+ // equal since their pointers are different
+ // There is no test for undetectability in strict equality.
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
+ // Get the type of the first operand.
+ // If the first object is a JS object, we have done pointer comparison.
+ Label first_non_object;
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(below, &first_non_object);
- // Get the type of the first operand.
- // If the first object is a JS object, we have done pointer comparison.
- Label first_non_object;
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &first_non_object);
+ // Return non-zero (eax is not zero)
+ Label return_not_equal;
+ ASSERT(kHeapObjectTag != 0);
+ __ bind(&return_not_equal);
+ __ ret(0);
- // Return non-zero (eax is not zero)
- Label return_not_equal;
- ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(above_equal, &return_not_equal);
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &return_not_equal);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- }
+ // Fall through to the general case.
__ bind(&slow);
}
@@ -11893,7 +11889,8 @@
__ bind(&check_for_strings);
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &call_builtin);
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
+ &check_unequal_objects);
// Inline comparison of ascii strings.
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@@ -11906,7 +11903,44 @@
__ Abort("Unexpected fall-through from string comparison");
#endif
- __ bind(&call_builtin);
+ __ bind(&check_unequal_objects);
+ if (cc_ == equal && !strict_) {
+ // Non-strict equality. Objects are unequal if
+ // they are both JSObjects and not undetectable,
+ // and their pointers are different.
+ Label not_both_objects;
+ Label return_unequal;
+ // At most one is a smi, so we can test for smi by adding the two.
+ // A smi plus a heap object has the low bit set, a heap object plus
+ // a heap object has the low bit clear.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(1, kSmiTagMask);
+ __ lea(ecx, Operand(eax, edx, times_1, 0));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, ¬_both_objects);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(below, ¬_both_objects);
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
+ __ j(below, ¬_both_objects);
+ // We do not bail out after this point. Both are JSObjects, and
+ // they are equal if and only if both are undetectable.
+ // The and of the undetectable flags is 1 if and only if they are equal.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal);
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal);
+ // The objects are both undetectable, so they both compare as the value
+ // undefined, and are equal.
+ __ Set(eax, Immediate(EQUAL));
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax,
+ // or return equal if we fell through to here.
+ __ ret(2 * kPointerSize); // rax, rdx were pushed
+ __ bind(¬_both_objects);
+ }
+
// must swap argument order
__ pop(ecx);
__ pop(edx);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 62f878c..062f0f2 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -61,11 +61,11 @@
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
-static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register r0,
- Register r1,
- Label* miss) {
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register r0,
+ Register r1,
+ Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// r0: used to hold receiver instance type.
@@ -98,36 +98,17 @@
}
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if name is not a symbol, and will jump to
-// the miss_label in that case.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register r0,
- Register r1,
- Register result) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is unchanged.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - used for the index into the property dictionary
- //
- // r1 - used to hold the capacity of the property dictionary.
- //
- // result - holds the result on exit.
-
- Label done;
-
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r0|. Jump to the |miss| label
+// otherwise.
+static void GenerateStringDictionaryProbes(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1) {
// Compute the capacity mask.
const int kCapacityOffset =
StringDictionary::kHeaderSize +
@@ -160,14 +141,61 @@
__ cmp(name, Operand(elements, r0, times_4,
kElementsStartOffset - kHeapObjectTag));
if (i != kProbes - 1) {
- __ j(equal, &done, taken);
+ __ j(equal, done, taken);
} else {
- __ j(not_equal, miss_label, not_taken);
+ __ j(not_equal, miss, not_taken);
}
}
+}
- // Check that the value is a normal property.
+
+
+// Helper function used to load a property from a dictionary backing
+// storage. This function may fail to load a property even though it is
+// in the dictionary, so code at miss_label must always call a backup
+// property load that is complete. This function is safe to call if
+// name is not a symbol, and will jump to the miss_label in that
+// case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss_label,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1,
+ Register result) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is unchanged.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // Scratch registers:
+ //
+ // r0 - used for the index into the property dictionary
+ //
+ // r1 - used to hold the capacity of the property dictionary.
+ //
+ // result - holds the result on exit.
+
+ Label done;
+
+ // Probe the dictionary.
+ GenerateStringDictionaryProbes(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
+
+ // If probing finds an entry in the dictionary, r0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property.
__ bind(&done);
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
@@ -179,6 +207,69 @@
}
+// Helper function used to store a property to a dictionary backing
+// storage. This function may fail to store a property eventhough it
+// is in the dictionary, so code at miss_label must always call a
+// backup property store that is complete. This function is safe to
+// call if name is not a symbol, and will jump to the miss_label in
+// that case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss_label,
+ Register elements,
+ Register name,
+ Register value,
+ Register r0,
+ Register r1) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is clobbered.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // value - holds the value to store and is unchanged.
+ //
+ // r0 - used for index into the property dictionary and is clobbered.
+ //
+ // r1 - used to hold the capacity of the property dictionary and is clobbered.
+ Label done;
+
+
+ // Probe the dictionary.
+ GenerateStringDictionaryProbes(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
+
+ // If probing finds an entry in the dictionary, r0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property that is not read only.
+ __ bind(&done);
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask
+ = (PropertyDetails::TypeField::mask() |
+ PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
+ Immediate(kTypeAndReadOnlyMask));
+ __ j(not_zero, miss_label, not_taken);
+
+ // Store the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+ __ mov(Operand(r0, 0), value);
+
+ // Update write barrier. Make sure not to clobber the value.
+ __ mov(r1, value);
+ __ RecordWrite(elements, r0, r1);
+}
+
+
static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
@@ -1238,7 +1329,7 @@
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- GenerateDictionaryLoadReceiverCheck(masm, edx, eax, ebx, &miss);
+ GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
// eax: elements
// Search the dictionary placing the result in edi.
@@ -1517,7 +1608,7 @@
// -----------------------------------
Label miss;
- GenerateDictionaryLoadReceiverCheck(masm, eax, edx, ebx, &miss);
+ GenerateStringDictionaryReceiverCheck(masm, eax, edx, ebx, &miss);
// edx: elements
// Search the dictionary placing the result in eax.
@@ -1775,6 +1866,36 @@
}
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ Label miss, restore_miss;
+
+ GenerateStringDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
+
+ // A lot of registers are needed for storing to slow case
+ // objects. Push and restore receiver but rely on
+ // GenerateDictionaryStore preserving the value and name.
+ __ push(edx);
+ GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
+ __ Drop(1);
+ __ IncrementCounter(&Counters::store_normal_hit, 1);
+ __ ret(0);
+
+ __ bind(&restore_miss);
+ __ pop(edx);
+
+ __ bind(&miss);
+ __ IncrementCounter(&Counters::store_normal_miss, 1);
+ GenerateMiss(masm);
+}
+
+
// Defined in ic.cc.
Object* KeyedStoreIC_Miss(Arguments args);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index b83f9bc..b3f7c21 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -98,11 +98,6 @@
}
-// For page containing |object| mark region covering [object+offset] dirty.
-// object is the object being stored into, value is the object being stored.
-// If offset is zero, then the scratch register contains the array index into
-// the elements array represented as a Smi.
-// All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object, int offset,
Register value, Register scratch) {
// The compiled code assumes that record write doesn't change the
@@ -153,6 +148,39 @@
}
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
+ Register value) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are esi.
+ ASSERT(!object.is(esi) && !value.is(esi) && !address.is(esi));
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ ASSERT_EQ(0, kSmiTag);
+ test(value, Immediate(kSmiTagMask));
+ j(zero, &done);
+
+ InNewSpace(object, value, equal, &done);
+
+ RecordWriteHelper(object, address, value);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ mov(object, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(address, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
cmp(esp,
Operand::StaticVariable(ExternalReference::address_of_stack_limit()));
@@ -514,97 +542,6 @@
}
-Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
- JSObject* holder, Register holder_reg,
- Register scratch,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between scratch and the other
- // registers.
- ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- mov(Operand(esp, kPointerSize), object_reg);
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- while (object != holder) {
- depth++;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- JSObject* prototype = JSObject::cast(object->GetPrototype());
- if (Heap::InNewSpace(prototype)) {
- // Get the map of the current object.
- mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- cmp(Operand(scratch), Immediate(Handle<Map>(object->map())));
- // Branch on the result of the map check.
- j(not_equal, miss, not_taken);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
-
- // Restore scratch register to be the map of the object.
- // We load the prototype from the map in the scratch register.
- mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- }
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- reg = holder_reg; // from now the object is in holder_reg
- mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
- } else {
- // Check the map of the current object.
- cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- // Branch on the result of the map check.
- j(not_equal, miss, not_taken);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
- }
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- mov(reg, Handle<JSObject>(prototype));
- }
-
- if (save_at_depth == depth) {
- mov(Operand(esp, kPointerSize), reg);
- }
-
- // Go to the next object in the prototype chain.
- object = prototype;
- }
-
- // Check the holder map.
- cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(holder->map())));
- j(not_equal, miss, not_taken);
-
- // Log the check depth.
- LOG(IntEvent("check-maps-depth", depth + 1));
-
- // Perform security check for access to the global object and return
- // the holder register.
- ASSERT(object == holder);
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
- }
- return reg;
-}
-
-
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 2018721..02cfd4d 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -73,16 +73,27 @@
Condition cc, // equal for new space, not_equal otherwise.
Label* branch);
- // For page containing |object| mark region covering [object+offset] dirty.
- // object is the object being stored into, value is the object being stored.
- // If offset is zero, then the scratch register contains the array index into
- // the elements array represented as a Smi.
- // All registers are clobbered by the operation.
+ // For page containing |object| mark region covering [object+offset]
+ // dirty. |object| is the object being stored into, |value| is the
+ // object being stored. If offset is zero, then the scratch register
+ // contains the array index into the elements array represented as a
+ // Smi. All registers are clobbered by the operation. RecordWrite
+ // filters out smis so it does not update the write barrier if the
+ // value is a smi.
void RecordWrite(Register object,
int offset,
Register value,
Register scratch);
+ // For page containing |object| mark region covering |address|
+ // dirty. |object| is the object being stored into, |value| is the
+ // object being stored. All registers are clobbered by the
+ // operation. RecordWrite filters out smis so it does not update the
+ // write barrier if the value is a smi.
+ void RecordWrite(Register object,
+ Register address,
+ Register value);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
@@ -233,24 +244,6 @@
// ---------------------------------------------------------------------------
// Inline caching support
- // Generates code that verifies that the maps of objects in the
- // prototype chain of object hasn't changed since the code was
- // generated and branches to the miss label if any map has. If
- // necessary the function also generates code for security check
- // in case of global object holders. The scratch and holder
- // registers are always clobbered, but the object register is only
- // clobbered if it the same as the holder register. The function
- // returns a register containing the holder - either object_reg or
- // holder_reg.
- // The function can optionally (when save_at_depth !=
- // kInvalidProtoDepth) save the object at the given depth by moving
- // it to [esp + kPointerSize].
- Register CheckMaps(JSObject* object, Register object_reg,
- JSObject* holder, Register holder_reg,
- Register scratch,
- int save_at_depth,
- Label* miss);
-
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register is clobbered.
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index bab0435..26361d1 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -101,6 +101,110 @@
}
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ String* name,
+ Register r0,
+ Register extra) {
+ ASSERT(name->IsSymbol());
+ __ IncrementCounter(&Counters::negative_lookups, 1);
+ __ IncrementCounter(&Counters::negative_lookups_miss, 1);
+
+ Label done;
+ __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ __ test(FieldOperand(r0, Map::kBitFieldOffset),
+ Immediate(kInterceptorOrAccessCheckNeededMask));
+ __ j(not_zero, miss_label, not_taken);
+
+ __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
+ __ j(below, miss_label, not_taken);
+
+ // Load properties array.
+ Register properties = r0;
+ __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Check that the properties array is a dictionary.
+ __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
+ Immediate(Factory::hash_table_map()));
+ __ j(not_equal, miss_label);
+
+ // Compute the capacity mask.
+ const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up.
+ static const int kProbes = 4;
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kProbes; i++) {
+ // r0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (extra.is(no_reg)) {
+ __ push(receiver);
+ }
+ Register index = extra.is(no_reg) ? receiver : extra;
+ // Capacity is smi 2^n.
+ __ mov(index, FieldOperand(properties, kCapacityOffset));
+ __ dec(index);
+ __ and_(Operand(index),
+ Immediate(Smi::FromInt(name->Hash() +
+ StringDictionary::GetProbeOffset(i))));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+
+ Register entity_name = extra.is(no_reg) ? properties : extra;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ cmp(entity_name, Factory::undefined_value());
+ if (extra.is(no_reg)) {
+ // 'receiver' shares a register with 'entity_name'.
+ __ pop(receiver);
+ }
+ if (i != kProbes - 1) {
+ __ j(equal, &done, taken);
+
+ // Stop if found the property.
+ __ cmp(entity_name, Handle<String>(name));
+ __ j(equal, miss_label, not_taken);
+
+ if (extra.is(no_reg)) {
+ // Restore the properties if their register was occupied by the name.
+ __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ }
+ } else {
+ // Give up probing if still not found the undefined value.
+ __ j(not_equal, miss_label, not_taken);
+ }
+ }
+
+ __ bind(&done);
+ __ DecrementCounter(&Counters::negative_lookups_miss, 1);
+}
+
+
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
@@ -723,6 +827,33 @@
}
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+static Object* GenerateCheckPropertyCells(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ Register scratch,
+ Label* miss) {
+ JSObject* current = object;
+ while (current != holder) {
+ if (current->IsGlobalObject()) {
+ Object* cell = GenerateCheckPropertyCell(masm,
+ GlobalObject::cast(current),
+ name,
+ scratch,
+ miss);
+ if (cell->IsFailure()) {
+ return cell;
+ }
+ }
+ ASSERT(current->IsJSObject());
+ current = JSObject::cast(current->GetPrototype());
+ }
+ return NULL;
+}
+
+
#undef __
#define __ ACCESS_MASM(masm())
@@ -733,33 +864,129 @@
Register holder_reg,
Register scratch,
String* name,
- int push_at_depth,
- Label* miss) {
- // Check that the maps haven't changed.
- Register result =
- masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,
- push_at_depth, miss);
+ int save_at_depth,
+ Label* miss,
+ Register extra) {
+ // Make sure there's no overlap between holder and object registers.
+ ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
+ ASSERT(!extra.is(object_reg) && !extra.is(holder_reg) && !extra.is(scratch));
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ JSObject* current = object;
+ int depth = 0;
+
+ if (save_at_depth == depth) {
+ __ mov(Operand(esp, kPointerSize), reg);
+ }
+
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (current != holder) {
+ depth++;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+
+ ASSERT(current->GetPrototype()->IsJSObject());
+ JSObject* prototype = JSObject::cast(current->GetPrototype());
+ if (!current->HasFastProperties() &&
+ !current->IsJSGlobalObject() &&
+ !current->IsJSGlobalProxy()) {
+ if (!name->IsSymbol()) {
+ Object* lookup_result = Heap::LookupSymbol(name);
+ if (lookup_result->IsFailure()) {
+ set_failure(Failure::cast(lookup_result));
+ return reg;
+ } else {
+ name = String::cast(lookup_result);
+ }
+ }
+ ASSERT(current->property_dictionary()->FindEntry(name) ==
+ StringDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(),
+ miss,
+ reg,
+ name,
+ scratch,
+ extra);
+ __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // from now the object is in holder_reg
+ __ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+ } else if (Heap::InNewSpace(prototype)) {
+ // Get the map of the current object.
+ __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ __ cmp(Operand(scratch), Immediate(Handle<Map>(current->map())));
+ // Branch on the result of the map check.
+ __ j(not_equal, miss, not_taken);
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch, miss);
+
+ // Restore scratch register to be the map of the object.
+ // We load the prototype from the map in the scratch register.
+ __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ }
+ // The prototype is in new space; we cannot store a reference
+ // to it in the code. Load it from the map.
+ reg = holder_reg; // from now the object is in holder_reg
+ __ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+ } else {
+ // Check the map of the current object.
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(current->map())));
+ // Branch on the result of the map check.
+ __ j(not_equal, miss, not_taken);
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch, miss);
+ }
+ // The prototype is in old space; load it directly.
+ reg = holder_reg; // from now the object is in holder_reg
+ __ mov(reg, Handle<JSObject>(prototype));
+ }
+
+ if (save_at_depth == depth) {
+ __ mov(Operand(esp, kPointerSize), reg);
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ }
+ ASSERT(current == holder);
+
+ // Log the check depth.
+ LOG(IntEvent("check-maps-depth", depth + 1));
+
+ // Check the holder map.
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(holder->map())));
+ __ j(not_equal, miss, not_taken);
+
+ // Perform security check for access to the global object.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+ if (holder->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch, miss);
+ };
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed. We also need to check that the
// property cell for the property is still empty.
- while (object != holder) {
- if (object->IsGlobalObject()) {
- Object* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(object),
- name,
- scratch,
- miss);
- if (cell->IsFailure()) {
- set_failure(Failure::cast(cell));
- return result;
- }
- }
- object = JSObject::cast(object->GetPrototype());
- }
+ Object* result = GenerateCheckPropertyCells(masm(),
+ object,
+ holder,
+ name,
+ scratch,
+ miss);
+ if (result->IsFailure()) set_failure(Failure::cast(result));
// Return the register containing the holder.
- return result;
+ return reg;
}
@@ -1083,7 +1310,8 @@
__ j(zero, &miss, not_taken);
// Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
+ Register reg = CheckPrototypes(object, edx, holder, ebx, eax,
+ name, &miss, edi);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
@@ -1145,7 +1373,7 @@
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
- eax, name, &miss);
+ eax, name, &miss, edi);
if (argc == 0) {
// Noop, return the length.
@@ -1291,7 +1519,7 @@
__ j(zero, &miss);
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
- eax, name, &miss);
+ eax, name, &miss, edi);
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1366,7 +1594,7 @@
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss);
+ ebx, edx, name, &miss, edi);
Register receiver = ebx;
Register index = edi;
@@ -1431,7 +1659,7 @@
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss);
+ ebx, edx, name, &miss, edi);
Register receiver = eax;
Register index = edi;
@@ -1536,7 +1764,7 @@
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, name, depth, &miss);
+ ebx, eax, name, depth, &miss, edi);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -1559,7 +1787,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss);
+ ebx, edx, name, &miss, edi);
}
break;
@@ -1579,7 +1807,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss);
+ ebx, edx, name, &miss, edi);
}
break;
}
@@ -1600,7 +1828,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss);
+ ebx, edx, name, &miss, edi);
}
break;
}
@@ -1722,7 +1950,7 @@
}
// Check that the maps haven't changed.
- CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
+ CheckPrototypes(object, edx, holder, ebx, eax, name, &miss, edi);
// Get the value from the cell.
__ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
@@ -1993,6 +2221,8 @@
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
+ ASSERT(last->IsGlobalObject() || last->HasFastProperties());
+
// Check the maps of the full prototype chain. Also check that
// global property cells up to (but not including) the last object
// in the prototype chain are empty.
@@ -2140,7 +2370,7 @@
}
// Check that the maps haven't changed.
- CheckPrototypes(object, eax, holder, ebx, edx, name, &miss);
+ CheckPrototypes(object, eax, holder, ebx, edx, name, &miss, edi);
// Get the value from the cell.
__ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
diff --git a/src/ic-inl.h b/src/ic-inl.h
index 131f77b..70bbaf8 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -80,11 +80,38 @@
}
-Map* IC::GetCodeCacheMapForObject(Object* object) {
- if (object->IsJSObject()) return JSObject::cast(object)->map();
+InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
+ JSObject* holder) {
+ if (object->IsJSObject()) {
+ return GetCodeCacheForObject(JSObject::cast(object), holder);
+ }
// If the object is a value, we use the prototype map for the cache.
ASSERT(object->IsString() || object->IsNumber() || object->IsBoolean());
- return JSObject::cast(object->GetPrototype())->map();
+ return PROTOTYPE_MAP;
+}
+
+
+InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
+ JSObject* holder) {
+ // Fast-properties and global objects store stubs in their own maps.
+ // Slow properties objects use prototype's map (unless the property is its own
+ // when holder == object). It works because slow properties objects having
+ // the same prototype (or a prototype with the same map) and not having
+ // the property are interchangeable for such a stub.
+ if (holder != object &&
+ !object->HasFastProperties() &&
+ !object->IsJSGlobalProxy() &&
+ !object->IsJSGlobalObject()) {
+ return PROTOTYPE_MAP;
+ }
+ return OWN_MAP;
+}
+
+
+Map* IC::GetCodeCacheMap(Object* object, InlineCacheHolderFlag holder) {
+ Object* map_owner = (holder == OWN_MAP ? object : object->GetPrototype());
+ ASSERT(map_owner->IsJSObject());
+ return JSObject::cast(map_owner)->map();
}
diff --git a/src/ic.cc b/src/ic.cc
index 4b77d92..cdb06ac 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -134,13 +134,45 @@
}
#endif
+
+static bool HasNormalObjectsInPrototypeChain(LookupResult* lookup,
+ Object* receiver) {
+ Object* end = lookup->IsProperty() ? lookup->holder() : Heap::null_value();
+ for (Object* current = receiver;
+ current != end;
+ current = current->GetPrototype()) {
+ if (current->IsJSObject() &&
+ !JSObject::cast(current)->HasFastProperties() &&
+ !current->IsJSGlobalProxy() &&
+ !current->IsJSGlobalObject()) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
IC::State state = target->ic_state();
if (state != MONOMORPHIC) return state;
if (receiver->IsUndefined() || receiver->IsNull()) return state;
- Map* map = GetCodeCacheMapForObject(receiver);
+ InlineCacheHolderFlag cache_holder =
+ Code::ExtractCacheHolderFromFlags(target->flags());
+
+
+ if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
+ // The stub was generated for JSObject but called for non-JSObject.
+ // IC::GetCodeCacheMap is not applicable.
+ return MONOMORPHIC;
+ } else if (cache_holder == PROTOTYPE_MAP &&
+ receiver->GetPrototype()->IsNull()) {
+ // IC::GetCodeCacheMap is not applicable.
+ return MONOMORPHIC;
+ }
+ Map* map = IC::GetCodeCacheMap(receiver, cache_holder);
// Decide whether the inline cache failed because of changes to the
// receiver itself or changes to one of its prototypes.
@@ -487,12 +519,24 @@
void CallICBase::UpdateCaches(LookupResult* lookup,
- State state,
- Handle<Object> object,
- Handle<String> name) {
+ State state,
+ Handle<Object> object,
+ Handle<String> name) {
// Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
+#ifndef V8_TARGET_ARCH_IA32
+ // Normal objects only implemented for IA32 by now.
+ if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
+#else
+ if (lookup->holder() != *object &&
+ HasNormalObjectsInPrototypeChain(lookup, object->GetPrototype())) {
+ // Suppress optimization for prototype chains with slow properties objects
+ // in the middle.
+ return;
+ }
+#endif
+
// Compute the number of arguments.
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
@@ -590,8 +634,13 @@
state == MONOMORPHIC_PROTOTYPE_FAILURE) {
set_target(Code::cast(code));
} else if (state == MEGAMORPHIC) {
+ // Cache code holding map should be consistent with
+ // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
+ Map* map = JSObject::cast(object->IsJSObject() ? *object :
+ object->GetPrototype())->map();
+
// Update the stub cache.
- StubCache::Set(*name, GetCodeCacheMapForObject(*object), Code::cast(code));
+ StubCache::Set(*name, map, Code::cast(code));
}
#ifdef DEBUG
@@ -795,6 +844,8 @@
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
+
// Compute the code stub for this load.
Object* code = NULL;
if (state == UNINITIALIZED) {
@@ -836,7 +887,7 @@
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return;
- code = StubCache::ComputeLoadNormal(*name, *receiver);
+ code = StubCache::ComputeLoadNormal();
}
break;
}
@@ -871,8 +922,12 @@
} else if (state == MONOMORPHIC) {
set_target(megamorphic_stub());
} else if (state == MEGAMORPHIC) {
- // Update the stub cache.
- StubCache::Set(*name, GetCodeCacheMapForObject(*object), Code::cast(code));
+ // Cache code holding map should be consistent with
+ // GenerateMonomorphicCacheProbe.
+ Map* map = JSObject::cast(object->IsJSObject() ? *object :
+ object->GetPrototype())->map();
+
+ StubCache::Set(*name, map, Code::cast(code));
}
#ifdef DEBUG
@@ -1018,6 +1073,8 @@
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
+
// Compute the code stub for this load.
Object* code = NULL;
@@ -1198,16 +1255,18 @@
break;
}
case NORMAL: {
- if (!receiver->IsGlobalObject()) {
- return;
+ if (receiver->IsGlobalObject()) {
+ // The stub generated for the global object picks the value directly
+ // from the property cell. So the property must be directly on the
+ // global object.
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+ JSGlobalPropertyCell* cell =
+ JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
+ code = StubCache::ComputeStoreGlobal(*name, *global, cell);
+ } else {
+ if (lookup->holder() != *receiver) return;
+ code = StubCache::ComputeStoreNormal();
}
- // The stub generated for the global object picks the value directly
- // from the property cell. So the property must be directly on the
- // global object.
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- code = StubCache::ComputeStoreGlobal(*name, *global, cell);
break;
}
case CALLBACKS: {
diff --git a/src/ic.h b/src/ic.h
index 738b6f4..0d5df96 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -117,9 +117,14 @@
return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
}
- // Returns the map to use for caching stubs for a given object.
- // This method should not be called with undefined or null.
- static inline Map* GetCodeCacheMapForObject(Object* object);
+ // Determines which map must be used for keeping the code stub.
+ // These methods should not be called with undefined or null.
+ static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object,
+ JSObject* holder);
+ static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object,
+ JSObject* holder);
+ static inline Map* GetCodeCacheMap(Object* object,
+ InlineCacheHolderFlag holder);
protected:
Address fp() const { return fp_; }
@@ -384,6 +389,7 @@
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateArrayLength(MacroAssembler* masm);
+ static void GenerateNormal(MacroAssembler* masm);
private:
// Update the inline cache and the global stub cache based on the
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index 34d5c0d..c8c6f08 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -51,7 +51,8 @@
// Applies the change to the script.
// The change is in form of list of chunks encoded in a single array as
// a series of triplets (pos1_start, pos1_end, pos2_end)
- function ApplyPatchMultiChunk(script, diff_array, new_source, change_log) {
+ function ApplyPatchMultiChunk(script, diff_array, new_source, preview_only,
+ change_log) {
var old_source = script.source;
@@ -96,7 +97,7 @@
}
// Recursively collects all newly compiled functions that are going into
- // business and should be have link to the actual script updated.
+ // business and should have link to the actual script updated.
function CollectNew(node_list) {
for (var i = 0; i < node_list.length; i++) {
link_to_original_script_list.push(node_list[i]);
@@ -121,6 +122,20 @@
}
}
+ var preview_description = {
+ change_tree: DescribeChangeTree(root_old_node),
+ textual_diff: {
+ old_len: old_source.length,
+ new_len: new_source.length,
+ chunks: diff_array
+ },
+ updated: false
+ };
+
+ if (preview_only) {
+ return preview_description;
+ }
+
HarvestTodo(root_old_node);
// Collect shared infos for functions whose code need to be patched.
@@ -132,13 +147,15 @@
}
}
- // Check that function being patched is not currently on stack.
- CheckStackActivations(replaced_function_infos, change_log);
-
-
// We haven't changed anything before this line yet.
// Committing all changes.
+ // Check that function being patched is not currently on stack or drop them.
+ var dropped_functions_number =
+ CheckStackActivations(replaced_function_infos, change_log);
+
+ preview_description.stack_modified = dropped_functions_number != 0;
+
// Start with breakpoints. Convert their line/column positions and
// temporary remove.
var break_points_restorer = TemporaryRemoveBreakPoints(script, change_log);
@@ -166,6 +183,8 @@
LinkToOldScript(link_to_old_script_list[i], old_script,
link_to_old_script_report);
}
+
+ preview_description.created_script_name = old_script_name;
}
// Link to an actual script all the functions that we are going to use.
@@ -189,6 +208,9 @@
}
break_points_restorer(pos_translator, old_script);
+
+ preview_description.updated = true;
+ return preview_description;
}
// Function is public.
this.ApplyPatchMultiChunk = ApplyPatchMultiChunk;
@@ -494,6 +516,16 @@
this.new_end_pos = void 0;
this.corresponding_node = void 0;
this.unmatched_new_nodes = void 0;
+
+ // 'Textual' correspondence/matching is weaker than 'pure'
+ // correspondence/matching. We need 'textual' level for visual presentation
+ // in UI, we use 'pure' level for actual code manipulation.
+ // Sometimes only function body is changed (functions in old and new script
+ // textually correspond), but we cannot patch the code, so we see them
+ // as an old function deleted and new function created.
+ this.textual_corresponding_node = void 0;
+ this.textually_unmatched_new_nodes = void 0;
+
this.live_shared_info_wrapper = void 0;
}
@@ -640,6 +672,7 @@
var new_children = new_node.children;
var unmatched_new_nodes_list = [];
+ var textually_unmatched_new_nodes_list = [];
var old_index = 0;
var new_index = 0;
@@ -650,6 +683,7 @@
if (new_children[new_index].info.start_position <
old_children[old_index].new_start_pos) {
unmatched_new_nodes_list.push(new_children[new_index]);
+ textually_unmatched_new_nodes_list.push(new_children[new_index]);
new_index++;
} else if (new_children[new_index].info.start_position ==
old_children[old_index].new_start_pos) {
@@ -657,6 +691,8 @@
old_children[old_index].new_end_pos) {
old_children[old_index].corresponding_node =
new_children[new_index];
+ old_children[old_index].textual_corresponding_node =
+ new_children[new_index];
if (old_children[old_index].status != FunctionStatus.UNCHANGED) {
ProcessChildren(old_children[old_index],
new_children[new_index]);
@@ -673,6 +709,7 @@
"No corresponding function in new script found";
old_node.status = FunctionStatus.CHANGED;
unmatched_new_nodes_list.push(new_children[new_index]);
+ textually_unmatched_new_nodes_list.push(new_children[new_index]);
}
new_index++;
old_index++;
@@ -694,21 +731,28 @@
while (new_index < new_children.length) {
unmatched_new_nodes_list.push(new_children[new_index]);
+ textually_unmatched_new_nodes_list.push(new_children[new_index]);
new_index++;
}
if (old_node.status == FunctionStatus.CHANGED) {
- if (!CompareFunctionExpectations(old_node.info, new_node.info)) {
+ var why_wrong_expectations =
+ WhyFunctionExpectationsDiffer(old_node.info, new_node.info);
+ if (why_wrong_expectations) {
old_node.status = FunctionStatus.DAMAGED;
- old_node.status_explanation = "Changed code expectations";
+ old_node.status_explanation = why_wrong_expectations;
}
}
old_node.unmatched_new_nodes = unmatched_new_nodes_list;
+ old_node.textually_unmatched_new_nodes =
+ textually_unmatched_new_nodes_list;
}
ProcessChildren(old_code_tree, new_code_tree);
old_code_tree.corresponding_node = new_code_tree;
+ old_code_tree.textual_corresponding_node = new_code_tree;
+
Assert(old_code_tree.status != FunctionStatus.DAMAGED,
"Script became damaged");
}
@@ -792,27 +836,37 @@
}
// Compares a function interface old and new version, whether it
- // changed or not.
- function CompareFunctionExpectations(function_info1, function_info2) {
+ // changed or not. Returns explanation if they differ.
+ function WhyFunctionExpectationsDiffer(function_info1, function_info2) {
// Check that function has the same number of parameters (there may exist
// an adapter, that won't survive function parameter number change).
if (function_info1.param_num != function_info2.param_num) {
- return false;
+ return "Changed parameter number: " + function_info1.param_num +
+ " and " + function_info2.param_num;
}
var scope_info1 = function_info1.scope_info;
var scope_info2 = function_info2.scope_info;
-
- if (!scope_info1) {
- return !scope_info2;
+
+ var scope_info1_text;
+ var scope_info2_text;
+
+ if (scope_info1) {
+ scope_info1_text = scope_info1.toString();
+ } else {
+ scope_info1_text = "";
}
-
- if (scope_info1.length != scope_info2.length) {
- return false;
+ if (scope_info2) {
+ scope_info2_text = scope_info2.toString();
+ } else {
+ scope_info2_text = "";
}
-
- // Check that outer scope structure is not changed. Otherwise the function
- // will not properly work with existing scopes.
- return scope_info1.toString() == scope_info2.toString();
+
+ if (scope_info1_text != scope_info2_text) {
+ return "Incompatible variable maps: [" + scope_info1_text +
+ "] and [" + scope_info2_text + "]";
+ }
+ // No differences. Return undefined.
+ return;
}
// Minifier forward declaration.
@@ -856,6 +910,8 @@
change_log.push( { functions_on_stack: problems } );
throw new Failure("Blocked by functions on stack");
}
+
+ return dropped.length;
}
// A copy of the FunctionPatchabilityStatus enum from liveedit.h
@@ -897,14 +953,11 @@
this.GetPcFromSourcePos = GetPcFromSourcePos;
// LiveEdit main entry point: changes a script text to a new string.
- function SetScriptSource(script, new_source, change_log) {
+ function SetScriptSource(script, new_source, preview_only, change_log) {
var old_source = script.source;
var diff = CompareStringsLinewise(old_source, new_source);
- if (diff.length == 0) {
- change_log.push( { empty_diff: true } );
- return;
- }
- ApplyPatchMultiChunk(script, diff, new_source, change_log);
+ return ApplyPatchMultiChunk(script, diff, new_source, preview_only,
+ change_log);
}
// Function is public.
this.SetScriptSource = SetScriptSource;
@@ -931,7 +984,67 @@
return ApplyPatchMultiChunk(script,
[ change_pos, change_pos + change_len, change_pos + new_str.length],
- new_source, change_log);
+ new_source, false, change_log);
+ }
+
+ // Creates JSON description for a change tree.
+ function DescribeChangeTree(old_code_tree) {
+
+ function ProcessOldNode(node) {
+ var child_infos = [];
+ for (var i = 0; i < node.children.length; i++) {
+ var child = node.children[i];
+ if (child.status != FunctionStatus.UNCHANGED) {
+ child_infos.push(ProcessOldNode(child));
+ }
+ }
+ var new_child_infos = [];
+ if (node.textually_unmatched_new_nodes) {
+ for (var i = 0; i < node.textually_unmatched_new_nodes.length; i++) {
+ var child = node.textually_unmatched_new_nodes[i];
+ new_child_infos.push(ProcessNewNode(child));
+ }
+ }
+ var res = {
+ name: node.info.function_name,
+ positions: DescribePositions(node),
+ status: node.status,
+ children: child_infos,
+ new_children: new_child_infos
+ };
+ if (node.status_explanation) {
+ res.status_explanation = node.status_explanation;
+ }
+ if (node.textual_corresponding_node) {
+ res.new_positions = DescribePositions(node.textual_corresponding_node);
+ }
+ return res;
+ }
+
+ function ProcessNewNode(node) {
+ var child_infos = [];
+ // Do not list ancestors.
+ if (false) {
+ for (var i = 0; i < node.children.length; i++) {
+ child_infos.push(ProcessNewNode(node.children[i]));
+ }
+ }
+ var res = {
+ name: node.info.function_name,
+ positions: DescribePositions(node),
+ children: child_infos,
+ };
+ return res;
+ }
+
+ function DescribePositions(node) {
+ return {
+ start_position: node.info.start_position,
+ end_position: node.info.end_position
+ };
+ }
+
+ return ProcessOldNode(old_code_tree);
}
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 950f8e0..04631a3 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -1187,7 +1187,12 @@
// Returns error message or NULL.
static const char* DropFrames(Vector<StackFrame*> frames,
int top_frame_index,
- int bottom_js_frame_index) {
+ int bottom_js_frame_index,
+ Debug::FrameDropMode* mode) {
+ if (Debug::kFrameDropperFrameSize < 0) {
+ return "Stack manipulations are not supported in this architecture.";
+ }
+
StackFrame* pre_top_frame = frames[top_frame_index - 1];
StackFrame* top_frame = frames[top_frame_index];
StackFrame* bottom_js_frame = frames[bottom_js_frame_index];
@@ -1198,12 +1203,18 @@
if (pre_top_frame->code()->is_inline_cache_stub() &&
pre_top_frame->code()->ic_state() == DEBUG_BREAK) {
// OK, we can drop inline cache calls.
+ *mode = Debug::FRAME_DROPPED_IN_IC_CALL;
+ } else if (pre_top_frame->code() == Debug::debug_break_slot()) {
+ // OK, we can drop debug break slot.
+ *mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
} else if (pre_top_frame->code() ==
Builtins::builtin(Builtins::FrameDropper_LiveEdit)) {
// OK, we can drop our own code.
+ *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
} else if (pre_top_frame->code()->kind() == Code::STUB &&
pre_top_frame->code()->major_key()) {
- // Unit Test entry, it's fine, we support this case.
+ // Entry from our unit tests, it's fine, we support this case.
+ *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
} else {
return "Unknown structure of stack above changing function";
}
@@ -1316,8 +1327,9 @@
return NULL;
}
+ Debug::FrameDropMode drop_mode = Debug::FRAMES_UNTOUCHED;
const char* error_message = DropFrames(frames, top_frame_index,
- bottom_js_frame_index);
+ bottom_js_frame_index, &drop_mode);
if (error_message != NULL) {
return error_message;
@@ -1331,7 +1343,7 @@
break;
}
}
- Debug::FramesHaveBeenDropped(new_id);
+ Debug::FramesHaveBeenDropped(new_id, drop_mode);
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
diff --git a/src/macros.py b/src/macros.py
index 7d97918..32c9651 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -120,6 +120,7 @@
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));
+macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0);
macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg));
diff --git a/src/messages.js b/src/messages.js
index a46af4a..7bac3b2 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -196,6 +196,7 @@
circular_structure: "Converting circular structure to JSON",
obj_ctor_property_non_object: "Object.%0 called on non-object",
array_indexof_not_defined: "Array.getIndexOf: Argument undefined",
+ object_not_extensible: "Can't add property %0, object is not extensible",
illegal_access: "illegal access"
};
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index f9def82..79f2c97 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1335,6 +1335,21 @@
}
+bool JSObject::HasFastProperties() {
+ return !properties()->IsDictionary();
+}
+
+
+int JSObject::MaxFastProperties() {
+ // Allow extra fast properties if the object has more than
+ // kMaxFastProperties in-object properties. When this is the case,
+ // it is very unlikely that the object is being used as a dictionary
+ // and there is a good chance that allowing more map transitions
+ // will be worth it.
+ return Max(map()->inobject_properties(), kMaxFastProperties);
+}
+
+
void Struct::InitializeBody(int object_size) {
Object* value = Heap::undefined_value();
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
@@ -1343,11 +1358,6 @@
}
-bool JSObject::HasFastProperties() {
- return !properties()->IsDictionary();
-}
-
-
bool Object::ToArrayIndex(uint32_t* index) {
if (IsSmi()) {
int value = Smi::cast(this)->value();
@@ -2189,6 +2199,20 @@
}
+void Map::set_is_extensible(bool value) {
+ if (value) {
+ set_bit_field2(bit_field2() | (1 << kIsExtensible));
+ } else {
+ set_bit_field2(bit_field2() & ~(1 << kIsExtensible));
+ }
+}
+
+bool Map::is_extensible() {
+ return ((1 << kIsExtensible) & bit_field2()) != 0;
+}
+
+
+
Code::Flags Code::flags() {
return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
}
@@ -2263,13 +2287,15 @@
InLoopFlag in_loop,
InlineCacheState ic_state,
PropertyType type,
- int argc) {
+ int argc,
+ InlineCacheHolderFlag holder) {
// Compute the bit mask.
int bits = kind << kFlagsKindShift;
if (in_loop) bits |= kFlagsICInLoopMask;
bits |= ic_state << kFlagsICStateShift;
bits |= type << kFlagsTypeShift;
bits |= argc << kFlagsArgumentsCountShift;
+ if (holder == PROTOTYPE_MAP) bits |= kFlagsCacheInPrototypeMapMask;
// Cast to flags and validate result before returning it.
Flags result = static_cast<Flags>(bits);
ASSERT(ExtractKindFromFlags(result) == kind);
@@ -2283,9 +2309,10 @@
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
PropertyType type,
+ InlineCacheHolderFlag holder,
InLoopFlag in_loop,
int argc) {
- return ComputeFlags(kind, in_loop, MONOMORPHIC, type, argc);
+ return ComputeFlags(kind, in_loop, MONOMORPHIC, type, argc, holder);
}
@@ -2318,6 +2345,12 @@
}
+InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
+ int bits = (flags & kFlagsCacheInPrototypeMapMask);
+ return bits != 0 ? PROTOTYPE_MAP : OWN_MAP;
+}
+
+
Code::Flags Code::RemoveTypeFromFlags(Flags flags) {
int bits = flags & ~kFlagsTypeMask;
return static_cast<Flags>(bits);
diff --git a/src/objects.cc b/src/objects.cc
index 883b28e..8288f63 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1276,7 +1276,7 @@
}
if (map()->unused_property_fields() == 0) {
- if (properties()->length() > kMaxFastProperties) {
+ if (properties()->length() > MaxFastProperties()) {
Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (obj->IsFailure()) return obj;
return AddSlowProperty(name, value, attributes);
@@ -1386,6 +1386,11 @@
Object* value,
PropertyAttributes attributes) {
ASSERT(!IsJSGlobalProxy());
+ if (!map()->is_extensible()) {
+ Handle<Object> args[1] = {Handle<String>(name)};
+ return Top::Throw(*Factory::NewTypeError("object_not_extensible",
+ HandleVector(args, 1)));
+ }
if (HasFastProperties()) {
// Ensure the descriptor array does not get too big.
if (map()->instance_descriptors()->number_of_descriptors() <
@@ -1474,7 +1479,7 @@
Object* new_value,
PropertyAttributes attributes) {
if (map()->unused_property_fields() == 0 &&
- properties()->length() > kMaxFastProperties) {
+ properties()->length() > MaxFastProperties()) {
Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (obj->IsFailure()) return obj;
return ReplaceSlowProperty(name, new_value, attributes);
@@ -1746,8 +1751,6 @@
result->DictionaryResult(this, entry);
return;
}
- // Slow case object skipped during lookup. Do not use inline caching.
- if (!IsGlobalObject()) result->DisallowCaching();
}
result->NotFound();
}
@@ -2576,6 +2579,25 @@
}
+Object* JSObject::PreventExtensions() {
+ // If there are fast elements we normalize.
+ if (HasFastElements()) {
+ NormalizeElements();
+ }
+ // Make sure that we never go back to fast case.
+ element_dictionary()->set_requires_slow_elements();
+
+ // Do a map transition, other objects with this map may still
+ // be extensible.
+ Object* new_map = map()->CopyDropTransitions();
+ if (new_map->IsFailure()) return new_map;
+ Map::cast(new_map)->set_is_extensible(false);
+ set_map(Map::cast(new_map));
+ ASSERT(!map()->is_extensible());
+ return new_map;
+}
+
+
// Tests for the fast common case for property enumeration:
// - This object and all prototypes has an enum cache (which means that it has
// no interceptors and needs no access checks).
@@ -3076,7 +3098,7 @@
Object* descriptors = instance_descriptors()->RemoveTransitions();
if (descriptors->IsFailure()) return descriptors;
cast(new_map)->set_instance_descriptors(DescriptorArray::cast(descriptors));
- return cast(new_map);
+ return new_map;
}
@@ -6209,6 +6231,15 @@
return value;
}
}
+ // When we set the is_extensible flag to false we always force
+ // the element into dictionary mode (and force them to stay there).
+ if (!map()->is_extensible()) {
+ Handle<Object> number(Heap::NumberFromUint32(index));
+ Handle<String> index_string(Factory::NumberToString(number));
+ Handle<Object> args[1] = { index_string };
+ return Top::Throw(*Factory::NewTypeError("object_not_extensible",
+ HandleVector(args, 1)));
+ }
Object* result = dictionary->AtNumberPut(index, value);
if (result->IsFailure()) return result;
if (elms != FixedArray::cast(result)) {
diff --git a/src/objects.h b/src/objects.h
index 0ad6f14..15cfd5c 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1367,6 +1367,7 @@
// Returns the index'th element.
// The undefined object if index is out of bounds.
Object* GetElementWithReceiver(JSObject* receiver, uint32_t index);
+ Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
Object* SetFastElementsCapacityAndLength(int capacity, int length);
Object* SetSlowElements(Object* length);
@@ -1516,6 +1517,10 @@
// Casting.
static inline JSObject* cast(Object* obj);
+ // Disalow further properties to be added to the object.
+ Object* PreventExtensions();
+
+
// Dispatched behavior.
void JSObjectIterateBody(int object_size, ObjectVisitor* v);
void JSObjectShortPrint(StringStream* accumulator);
@@ -1547,6 +1552,11 @@
#endif
Object* SlowReverseLookup(Object* value);
+ // Maximal number of fast properties for the JSObject. Used to
+ // restrict the number of map transitions to avoid an explosion in
+ // the number of maps for objects used as dictionaries.
+ inline int MaxFastProperties();
+
// Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
// Also maximal value of JSArray's length property.
static const uint32_t kMaxElementCount = 0xffffffffu;
@@ -1568,8 +1578,6 @@
STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize);
- Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
-
private:
Object* GetElementWithCallback(Object* receiver,
Object* structure,
@@ -2765,11 +2773,13 @@
InLoopFlag in_loop = NOT_IN_LOOP,
InlineCacheState ic_state = UNINITIALIZED,
PropertyType type = NORMAL,
- int argc = -1);
+ int argc = -1,
+ InlineCacheHolderFlag holder = OWN_MAP);
static inline Flags ComputeMonomorphicFlags(
Kind kind,
PropertyType type,
+ InlineCacheHolderFlag holder = OWN_MAP,
InLoopFlag in_loop = NOT_IN_LOOP,
int argc = -1);
@@ -2778,6 +2788,7 @@
static inline InLoopFlag ExtractICInLoopFromFlags(Flags flags);
static inline PropertyType ExtractTypeFromFlags(Flags flags);
static inline int ExtractArgumentsCountFromFlags(Flags flags);
+ static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
static inline Flags RemoveTypeFromFlags(Flags flags);
// Convert a target address into a code object.
@@ -2864,16 +2875,18 @@
static const int kFlagsICInLoopShift = 3;
static const int kFlagsTypeShift = 4;
static const int kFlagsKindShift = 7;
- static const int kFlagsArgumentsCountShift = 11;
+ static const int kFlagsICHolderShift = 11;
+ static const int kFlagsArgumentsCountShift = 12;
static const int kFlagsICStateMask = 0x00000007; // 00000000111
static const int kFlagsICInLoopMask = 0x00000008; // 00000001000
static const int kFlagsTypeMask = 0x00000070; // 00001110000
static const int kFlagsKindMask = 0x00000780; // 11110000000
- static const int kFlagsArgumentsCountMask = 0xFFFFF800;
+ static const int kFlagsCacheInPrototypeMapMask = 0x00000800;
+ static const int kFlagsArgumentsCountMask = 0xFFFFF000;
static const int kFlagsNotUsedInLookup =
- (kFlagsICInLoopMask | kFlagsTypeMask);
+ (kFlagsICInLoopMask | kFlagsTypeMask | kFlagsCacheInPrototypeMapMask);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
@@ -2980,13 +2993,8 @@
return ((1 << kHasInstanceCallHandler) & bit_field()) != 0;
}
- inline void set_is_extensible() {
- set_bit_field2(bit_field2() | (1 << kIsExtensible));
- }
-
- inline bool is_extensible() {
- return ((1 << kIsExtensible) & bit_field2()) != 0;
- }
+ inline void set_is_extensible(bool value);
+ inline bool is_extensible();
// Tells whether the instance has fast elements.
void set_has_fast_elements(bool value) {
diff --git a/src/rewriter.cc b/src/rewriter.cc
index c97408e..73301b9 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -87,11 +87,13 @@
void AstOptimizer::VisitExpressionStatement(ExpressionStatement* node) {
+ node->expression()->set_no_negative_zero(true);
Visit(node->expression());
}
void AstOptimizer::VisitIfStatement(IfStatement* node) {
+ node->condition()->set_no_negative_zero(true);
Visit(node->condition());
Visit(node->then_statement());
if (node->HasElseStatement()) {
@@ -101,6 +103,7 @@
void AstOptimizer::VisitDoWhileStatement(DoWhileStatement* node) {
+ node->cond()->set_no_negative_zero(true);
Visit(node->cond());
Visit(node->body());
}
@@ -108,6 +111,7 @@
void AstOptimizer::VisitWhileStatement(WhileStatement* node) {
has_function_literal_ = false;
+ node->cond()->set_no_negative_zero(true);
Visit(node->cond());
node->may_have_function_literal_ = has_function_literal_;
Visit(node->body());
@@ -120,6 +124,7 @@
}
if (node->cond() != NULL) {
has_function_literal_ = false;
+ node->cond()->set_no_negative_zero(true);
Visit(node->cond());
node->may_have_function_literal_ = has_function_literal_;
}
@@ -151,6 +156,7 @@
void AstOptimizer::VisitSwitchStatement(SwitchStatement* node) {
+ node->tag()->set_no_negative_zero(true);
Visit(node->tag());
for (int i = 0; i < node->cases()->length(); i++) {
CaseClause* clause = node->cases()->at(i);
@@ -444,6 +450,7 @@
if (FLAG_safe_int32_compiler) {
switch (node->op()) {
case Token::BIT_NOT:
+ node->expression()->set_no_negative_zero(true);
node->expression()->set_to_int32(true);
// Fall through.
case Token::ADD:
@@ -476,10 +483,49 @@
}
+static bool CouldBeNegativeZero(AstNode* node) {
+ Literal* literal = node->AsLiteral();
+ if (literal != NULL) {
+ Handle<Object> handle = literal->handle();
+ if (handle->IsString() || handle->IsSmi()) {
+ return false;
+ } else if (handle->IsHeapNumber()) {
+ double double_value = HeapNumber::cast(*handle)->value();
+ if (double_value != 0) {
+ return false;
+ }
+ }
+ }
+ BinaryOperation* binary = node->AsBinaryOperation();
+ if (binary != NULL && Token::IsBitOp(binary->op())) {
+ return false;
+ }
+ return true;
+}
+
+
+static bool CouldBePositiveZero(AstNode* node) {
+ Literal* literal = node->AsLiteral();
+ if (literal != NULL) {
+ Handle<Object> handle = literal->handle();
+ if (handle->IsSmi()) {
+ if (Smi::cast(*handle) != Smi::FromInt(0)) {
+ return false;
+ }
+ } else if (handle->IsHeapNumber()) {
+ // Heap number literal can't be +0, because that's a Smi.
+ return false;
+ }
+ }
+ return true;
+}
+
+
void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
// Depending on the operation we can propagate this node's type down the
// AST nodes.
- switch (node->op()) {
+ Token::Value op = node->op();
+ switch (op) {
case Token::COMMA:
case Token::OR:
node->left()->set_no_negative_zero(true);
@@ -503,23 +549,54 @@
node->left()->set_no_negative_zero(true);
node->right()->set_no_negative_zero(true);
break;
+ case Token::MUL: {
+ VariableProxy* lvar_proxy = node->left()->AsVariableProxy();
+ VariableProxy* rvar_proxy = node->right()->AsVariableProxy();
+ if (lvar_proxy != NULL && rvar_proxy != NULL) {
+ Variable* lvar = lvar_proxy->AsVariable();
+ Variable* rvar = rvar_proxy->AsVariable();
+ if (lvar != NULL && rvar != NULL) {
+ if (lvar->mode() == Variable::VAR && rvar->mode() == Variable::VAR) {
+ Slot* lslot = lvar->slot();
+ Slot* rslot = rvar->slot();
+ if (lslot->type() == rslot->type() &&
+ (lslot->type() == Slot::PARAMETER ||
+ lslot->type() == Slot::LOCAL) &&
+ lslot->index() == rslot->index()) {
+ // A number squared doesn't give negative zero.
+ node->set_no_negative_zero(true);
+ }
+ }
+ }
+ }
+ }
case Token::ADD:
case Token::SUB:
- case Token::MUL:
case Token::DIV:
- case Token::MOD:
+ case Token::MOD: {
if (node->type()->IsLikelySmi()) {
node->left()->type()->SetAsLikelySmiIfUnknown();
node->right()->type()->SetAsLikelySmiIfUnknown();
}
- node->left()->set_no_negative_zero(node->no_negative_zero());
- node->right()->set_no_negative_zero(node->no_negative_zero());
+ if (op == Token::ADD && (!CouldBeNegativeZero(node->left()) ||
+ !CouldBeNegativeZero(node->right()))) {
+ node->left()->set_no_negative_zero(true);
+ node->right()->set_no_negative_zero(true);
+ } else if (op == Token::SUB && (!CouldBeNegativeZero(node->left()) ||
+ !CouldBePositiveZero(node->right()))) {
+ node->left()->set_no_negative_zero(true);
+ node->right()->set_no_negative_zero(true);
+ } else {
+ node->left()->set_no_negative_zero(node->no_negative_zero());
+ node->right()->set_no_negative_zero(node->no_negative_zero());
+ }
if (node->op() == Token::DIV) {
node->right()->set_no_negative_zero(false);
} else if (node->op() == Token::MOD) {
node->right()->set_no_negative_zero(true);
}
break;
+ }
default:
UNREACHABLE();
break;
@@ -530,7 +607,7 @@
// After visiting the operand nodes we have to check if this node's type
// can be updated. If it does, then we can push that information down
- // towards the leafs again if the new information is an upgrade over the
+ // towards the leaves again if the new information is an upgrade over the
// previous type of the operand nodes.
if (node->type()->IsUnknown()) {
if (node->left()->type()->IsLikelySmi() ||
@@ -590,7 +667,7 @@
void AstOptimizer::VisitCompareOperation(CompareOperation* node) {
if (node->type()->IsKnown()) {
- // Propagate useful information down towards the leafs.
+ // Propagate useful information down towards the leaves.
node->left()->type()->SetAsLikelySmiIfUnknown();
node->right()->type()->SetAsLikelySmiIfUnknown();
}
@@ -604,7 +681,7 @@
// After visiting the operand nodes we have to check if this node's type
// can be updated. If it does, then we can push that information down
- // towards the leafs again if the new information is an upgrade over the
+ // towards the leaves again if the new information is an upgrade over the
// previous type of the operand nodes.
if (node->type()->IsUnknown()) {
if (node->left()->type()->IsLikelySmi() ||
diff --git a/src/runtime.cc b/src/runtime.cc
index 22e80b3..a3eb09f 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -678,6 +678,12 @@
}
+static Object* Runtime_PreventExtensions(Arguments args) {
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSObject, obj, args[0]);
+ return obj->PreventExtensions();
+}
+
static Object* Runtime_IsExtensible(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, obj, args[0]);
@@ -5362,9 +5368,6 @@
}
-
-
-
static Object* Runtime_NumberToIntegerMapMinusZero(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
diff --git a/src/runtime.h b/src/runtime.h
index 3d4df1b..5719fc8 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -72,6 +72,7 @@
F(GetOwnProperty, 2, 1) \
\
F(IsExtensible, 1, 1) \
+ F(PreventExtensions, 1, 1)\
\
/* Utilities */ \
F(GetFunctionDelegate, 1, 1) \
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index ffa92dd..a654a08 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -94,6 +94,7 @@
Object* StubCache::ComputeLoadNonexistent(String* name, JSObject* receiver) {
+ ASSERT(receiver->IsGlobalObject() || receiver->HasFastProperties());
// If no global objects are present in the prototype chain, the load
// nonexistent IC stub can be shared for all names for a given map
// and we use the empty string for the map cache in that case. If
@@ -129,14 +130,16 @@
JSObject* receiver,
JSObject* holder,
int field_index) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadField(receiver, holder, field_index, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -148,14 +151,16 @@
JSObject* holder,
AccessorInfo* callback) {
ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -166,15 +171,17 @@
JSObject* receiver,
JSObject* holder,
Object* value) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadConstant(receiver, holder, value, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -184,21 +191,23 @@
Object* StubCache::ComputeLoadInterceptor(String* name,
JSObject* receiver,
JSObject* holder) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
}
-Object* StubCache::ComputeLoadNormal(String* name, JSObject* receiver) {
+Object* StubCache::ComputeLoadNormal() {
return Builtins::builtin(Builtins::LoadIC_Normal);
}
@@ -208,8 +217,10 @@
GlobalObject* holder,
JSGlobalPropertyCell* cell,
bool is_dont_delete) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadGlobal(receiver,
@@ -219,7 +230,7 @@
is_dont_delete);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -230,14 +241,16 @@
JSObject* receiver,
JSObject* holder,
int field_index) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadField(name, receiver, holder, field_index);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -248,15 +261,17 @@
JSObject* receiver,
JSObject* holder,
Object* value) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadConstant(name, receiver, holder, value);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -266,15 +281,17 @@
Object* StubCache::ComputeKeyedLoadInterceptor(String* name,
JSObject* receiver,
JSObject* holder) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -285,15 +302,17 @@
JSObject* receiver,
JSObject* holder,
AccessorInfo* callback) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -305,13 +324,15 @@
JSArray* receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ ASSERT(receiver->IsJSObject());
+ Map* map = receiver->map();
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadArrayLength(name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -322,13 +343,14 @@
String* receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Map* map = receiver->map();
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadStringLength(name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -339,13 +361,14 @@
JSFunction* receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Map* map = receiver->map();
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadFunctionPrototype(name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -371,6 +394,11 @@
}
+Object* StubCache::ComputeStoreNormal() {
+ return Builtins::builtin(Builtins::StoreIC_Normal);
+}
+
+
Object* StubCache::ComputeStoreGlobal(String* name,
GlobalObject* receiver,
JSGlobalPropertyCell* cell) {
@@ -380,7 +408,7 @@
StoreStubCompiler compiler;
code = compiler.CompileStoreGlobal(receiver, cell, name);
if (code->IsFailure()) return code;
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -451,7 +479,9 @@
JSObject* holder,
JSFunction* function) {
// Compute the check type and the map.
- Map* map = IC::GetCodeCacheMapForObject(object);
+ InlineCacheHolderFlag cache_holder =
+ IC::GetCodeCacheForObject(object, holder);
+ Map* map = IC::GetCodeCacheMap(object, cache_holder);
// Compute check type based on receiver/holder.
StubCompiler::CheckType check = StubCompiler::RECEIVER_MAP_CHECK;
@@ -466,6 +496,7 @@
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
CONSTANT_FUNCTION,
+ cache_holder,
in_loop,
argc);
Object* code = map->FindInCodeCache(name, flags);
@@ -476,7 +507,7 @@
// caches.
if (!function->is_compiled()) return Failure::InternalError();
// Compile the stub - only create stubs for fully compiled functions.
- CallStubCompiler compiler(argc, in_loop, kind);
+ CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
code = compiler.CompileCallConstant(object, holder, function, name, check);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
@@ -497,7 +528,9 @@
JSObject* holder,
int index) {
// Compute the check type and the map.
- Map* map = IC::GetCodeCacheMapForObject(object);
+ InlineCacheHolderFlag cache_holder =
+ IC::GetCodeCacheForObject(object, holder);
+ Map* map = IC::GetCodeCacheMap(object, cache_holder);
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
@@ -508,11 +541,12 @@
Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
FIELD,
+ cache_holder,
in_loop,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- CallStubCompiler compiler(argc, in_loop, kind);
+ CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
code = compiler.CompileCallField(JSObject::cast(object),
holder,
index,
@@ -534,8 +568,9 @@
Object* object,
JSObject* holder) {
// Compute the check type and the map.
- // If the object is a value, we use the prototype map for the cache.
- Map* map = IC::GetCodeCacheMapForObject(object);
+ InlineCacheHolderFlag cache_holder =
+ IC::GetCodeCacheForObject(object, holder);
+ Map* map = IC::GetCodeCacheMap(object, cache_holder);
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
@@ -547,11 +582,12 @@
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
INTERCEPTOR,
+ cache_holder,
NOT_IN_LOOP,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- CallStubCompiler compiler(argc, NOT_IN_LOOP, kind);
+ CallStubCompiler compiler(argc, NOT_IN_LOOP, kind, cache_holder);
code = compiler.CompileCallInterceptor(JSObject::cast(object),
holder,
name);
@@ -585,25 +621,29 @@
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function) {
+ InlineCacheHolderFlag cache_holder =
+ IC::GetCodeCacheForObject(receiver, holder);
+ Map* map = IC::GetCodeCacheMap(receiver, cache_holder);
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
NORMAL,
+ cache_holder,
in_loop,
argc);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
// because it may cause GC. To avoid this issue, we return an
// internal error which will make sure we do not update any
// caches.
if (!function->is_compiled()) return Failure::InternalError();
- CallStubCompiler compiler(argc, in_loop, kind);
+ CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
code = compiler.CompileCallGlobal(receiver, holder, cell, function, name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -1203,6 +1243,17 @@
}
+CallStubCompiler::CallStubCompiler(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind,
+ InlineCacheHolderFlag cache_holder)
+ : arguments_(argc)
+ , in_loop_(in_loop)
+ , kind_(kind)
+ , cache_holder_(cache_holder) {
+}
+
+
Object* CallStubCompiler::CompileCustomCall(int generator_id,
Object* object,
JSObject* holder,
@@ -1230,6 +1281,7 @@
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
type,
+ cache_holder_,
in_loop_,
argc);
return GetCodeWithFlags(flags, name);
diff --git a/src/stub-cache.h b/src/stub-cache.h
index fcfffcf..856904a 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -77,7 +77,7 @@
JSObject* receiver,
JSObject* holder);
- static Object* ComputeLoadNormal(String* name, JSObject* receiver);
+ static Object* ComputeLoadNormal();
static Object* ComputeLoadGlobal(String* name,
@@ -121,6 +121,8 @@
int field_index,
Map* transition = NULL);
+ static Object* ComputeStoreNormal();
+
static Object* ComputeStoreGlobal(String* name,
GlobalObject* receiver,
JSGlobalPropertyCell* cell);
@@ -407,8 +409,21 @@
static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
- // Check the integrity of the prototype chain to make sure that the
- // current IC is still valid.
+ // Generates code that verifies that the property holder has not changed
+ // (checking maps of objects in the prototype chain for fast and global
+ // objects or doing negative lookup for slow objects, ensures that the
+ // property cells for global objects are still empty) and checks that the map
+ // of the holder has not changed. If necessary the function also generates
+ // code for security check in case of global object holders. Helps to make
+ // sure that the current IC is still valid.
+ //
+ // The scratch and holder registers are always clobbered, but the object
+ // register is only clobbered if it the same as the holder register. The
+ // function returns a register containing the holder - either object_reg or
+ // holder_reg.
+ // The function can optionally (when save_at_depth !=
+ // kInvalidProtoDepth) save the object at the given depth by moving
+ // it to [esp + kPointerSize].
Register CheckPrototypes(JSObject* object,
Register object_reg,
@@ -416,9 +431,10 @@
Register holder_reg,
Register scratch,
String* name,
- Label* miss) {
+ Label* miss,
+ Register extra = no_reg) {
return CheckPrototypes(object, object_reg, holder, holder_reg, scratch,
- name, kInvalidProtoDepth, miss);
+ name, kInvalidProtoDepth, miss, extra);
}
Register CheckPrototypes(JSObject* object,
@@ -428,7 +444,8 @@
Register scratch,
String* name,
int save_at_depth,
- Label* miss);
+ Label* miss,
+ Register extra = no_reg);
protected:
Object* GetCodeWithFlags(Code::Flags flags, const char* name);
@@ -611,8 +628,10 @@
kNumCallGenerators
};
- CallStubCompiler(int argc, InLoopFlag in_loop, Code::Kind kind)
- : arguments_(argc), in_loop_(in_loop), kind_(kind) { }
+ CallStubCompiler(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind,
+ InlineCacheHolderFlag cache_holder);
Object* CompileCallField(JSObject* object,
JSObject* holder,
@@ -653,6 +672,7 @@
const ParameterCount arguments_;
const InLoopFlag in_loop_;
const Code::Kind kind_;
+ const InlineCacheHolderFlag cache_holder_;
const ParameterCount& arguments() { return arguments_; }
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 93fecd1..509de3d 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -153,6 +153,8 @@
SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
+ SC(store_normal_miss, V8.StoreNormalMiss) \
+ SC(store_normal_hit, V8.StoreNormalHit) \
SC(call_miss, V8.CallMiss) \
SC(keyed_call_miss, V8.KeyedCallMiss) \
SC(load_miss, V8.LoadMiss) \
@@ -166,6 +168,8 @@
SC(constructed_objects, V8.ConstructedObjects) \
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
+ SC(negative_lookups, V8.NegativeLookups) \
+ SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
SC(array_function_runtime, V8.ArrayFunctionRuntime) \
SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \
diff --git a/src/v8dll-main.cc b/src/v8dll-main.cc
new file mode 100644
index 0000000..3d4b3a3
--- /dev/null
+++ b/src/v8dll-main.cc
@@ -0,0 +1,39 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <windows.h>
+
+#include "../include/v8.h"
+
+extern "C" {
+BOOL WINAPI DllMain(HANDLE hinstDLL,
+ DWORD dwReason,
+ LPVOID lpvReserved) {
+ // Do nothing.
+ return TRUE;
+}
+}
diff --git a/src/v8natives.js b/src/v8natives.js
index 24d5e7c..487faab 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -745,6 +745,27 @@
}
+// ES5 section 15.2.3.10
+function ObjectPreventExtension(obj) {
+ if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
+ !IS_UNDETECTABLE(obj)) {
+ throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
+ }
+ %PreventExtensions(obj);
+ return obj;
+}
+
+
+// ES5 section 15.2.3.13
+function ObjectIsExtensible(obj) {
+ if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
+ !IS_UNDETECTABLE(obj)) {
+ throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
+ }
+ return %IsExtensible(obj);
+}
+
+
%SetCode($Object, function(x) {
if (%_IsConstructCall()) {
if (x == null) return this;
@@ -780,7 +801,9 @@
"defineProperties", ObjectDefineProperties,
"getPrototypeOf", ObjectGetPrototypeOf,
"getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
- "getOwnPropertyNames", ObjectGetOwnPropertyNames
+ "getOwnPropertyNames", ObjectGetOwnPropertyNames,
+ "isExtensible", ObjectIsExtensible,
+ "preventExtensions", ObjectPreventExtension
));
}
diff --git a/src/version.cc b/src/version.cc
index 68c2e73..db604e0 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
-#define BUILD_NUMBER 21
+#define BUILD_NUMBER 22
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index ff655c7..a38ebaf 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1238,10 +1238,6 @@
__ movq(rbx, r8);
#endif // _WIN64
- // Set up the roots register.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(kRootRegister, roots_address);
-
// Current stack contents:
// [rsp + 2 * kPointerSize ... ]: Internal frame
// [rsp + kPointerSize] : function
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index a6d31be..3b1aeae 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -592,7 +592,6 @@
&& (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
&& (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
&& (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
- && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
&& (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
}
#endif
@@ -1600,11 +1599,133 @@
}
+void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
+ // A fast smi loop is a for loop with an initializer
+ // that is a simple assignment of a smi to a stack variable,
+ // a test that is a simple test of that variable against a smi constant,
+ // and a step that is a increment/decrement of the variable, and
+ // where the variable isn't modified in the loop body.
+ // This guarantees that the variable is always a smi.
+
+ Variable* loop_var = node->loop_variable();
+ Smi* initial_value = *Handle<Smi>::cast(node->init()
+ ->StatementAsSimpleAssignment()->value()->AsLiteral()->handle());
+ Smi* limit_value = *Handle<Smi>::cast(
+ node->cond()->AsCompareOperation()->right()->AsLiteral()->handle());
+ Token::Value compare_op =
+ node->cond()->AsCompareOperation()->op();
+ bool increments =
+ node->next()->StatementAsCountOperation()->op() == Token::INC;
+
+ // Check that the condition isn't initially false.
+ bool initially_false = false;
+ int initial_int_value = initial_value->value();
+ int limit_int_value = limit_value->value();
+ switch (compare_op) {
+ case Token::LT:
+ initially_false = initial_int_value >= limit_int_value;
+ break;
+ case Token::LTE:
+ initially_false = initial_int_value > limit_int_value;
+ break;
+ case Token::GT:
+ initially_false = initial_int_value <= limit_int_value;
+ break;
+ case Token::GTE:
+ initially_false = initial_int_value < limit_int_value;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ if (initially_false) return;
+
+ // Only check loop condition at the end.
+
+ Visit(node->init());
+
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+ // Set type and stack height of BreakTargets.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+ IncrementLoopNesting();
+ loop.Bind();
+
+ // Set number type of the loop variable to smi.
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+
+ SetTypeForStackSlot(loop_var->slot(), TypeInfo::Smi());
+ Visit(node->body());
+
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+
+ if (has_valid_frame()) {
+ CodeForStatementPosition(node);
+ Slot* loop_var_slot = loop_var->slot();
+ if (loop_var_slot->type() == Slot::LOCAL) {
+ frame_->PushLocalAt(loop_var_slot->index());
+ } else {
+ ASSERT(loop_var_slot->type() == Slot::PARAMETER);
+ frame_->PushParameterAt(loop_var_slot->index());
+ }
+ Result loop_var_result = frame_->Pop();
+ if (!loop_var_result.is_register()) {
+ loop_var_result.ToRegister();
+ }
+
+ if (increments) {
+ __ SmiAddConstant(loop_var_result.reg(),
+ loop_var_result.reg(),
+ Smi::FromInt(1));
+ } else {
+ __ SmiSubConstant(loop_var_result.reg(),
+ loop_var_result.reg(),
+ Smi::FromInt(1));
+ }
+
+ {
+ __ SmiCompare(loop_var_result.reg(), limit_value);
+ Condition condition;
+ switch (compare_op) {
+ case Token::LT:
+ condition = less;
+ break;
+ case Token::LTE:
+ condition = less_equal;
+ break;
+ case Token::GT:
+ condition = greater;
+ break;
+ case Token::GTE:
+ condition = greater_equal;
+ break;
+ default:
+ condition = never;
+ UNREACHABLE();
+ }
+ loop.Branch(condition);
+ }
+ loop_var_result.Unuse();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ DecrementLoopNesting();
+}
+
+
void CodeGenerator::VisitForStatement(ForStatement* node) {
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ ForStatement");
CodeForStatementPosition(node);
+ if (node->is_fast_smi_loop()) {
+ GenerateFastSmiLoop(node);
+ return;
+ }
+
// Compile the init expression if present.
if (node->init() != NULL) {
Visit(node->init());
@@ -1694,16 +1815,6 @@
CheckStack(); // TODO(1222600): ignore if body contains calls.
- // We know that the loop index is a smi if it is not modified in the
- // loop body and it is checked against a constant limit in the loop
- // condition. In this case, we reset the static type information of the
- // loop index to smi before compiling the body, the update expression, and
- // the bottom check of the loop condition.
- if (node->is_fast_smi_loop()) {
- // Set number type of the loop variable to smi.
- SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
- }
-
Visit(node->body());
// If there is an update expression, compile it if necessary.
@@ -1723,13 +1834,6 @@
}
}
- // Set the type of the loop variable to smi before compiling the test
- // expression if we are in a fast smi loop condition.
- if (node->is_fast_smi_loop() && has_valid_frame()) {
- // Set number type of the loop variable to smi.
- SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
- }
-
// Based on the condition analysis, compile the backward jump as
// necessary.
switch (info) {
@@ -3501,17 +3605,16 @@
__ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
}
if (is_increment) {
- __ SmiAddConstant(kScratchRegister,
+ __ SmiAddConstant(new_value.reg(),
new_value.reg(),
Smi::FromInt(1),
deferred->entry_label());
} else {
- __ SmiSubConstant(kScratchRegister,
+ __ SmiSubConstant(new_value.reg(),
new_value.reg(),
Smi::FromInt(1),
deferred->entry_label());
}
- __ movq(new_value.reg(), kScratchRegister);
deferred->BindExit();
// Postfix count operations return their input converted to
@@ -8622,26 +8725,26 @@
__ bind(&seq_ascii_string);
// rax: subject string (sequential ascii)
// rcx: RegExp data (FixedArray)
- __ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
__ Set(rdi, 1); // Type is ascii.
__ jmp(&check_code);
__ bind(&seq_two_byte_string);
// rax: subject string (flat two-byte)
// rcx: RegExp data (FixedArray)
- __ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
__ Set(rdi, 0); // Type is two byte.
__ bind(&check_code);
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// the hole.
- __ CmpObjectType(r12, CODE_TYPE, kScratchRegister);
+ __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// rax: subject string
// rdi: encoding of subject string (1 if ascii, 0 if two_byte);
- // r12: code
+ // r11: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
__ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
@@ -8649,7 +8752,7 @@
// rax: subject string
// rbx: previous index
// rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r12: code
+ // r11: code
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(&Counters::regexp_entry_native, 1);
@@ -8699,7 +8802,7 @@
// rax: subject string
// rbx: previous index
// rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r12: code
+ // r11: code
// Argument 4: End of string data
// Argument 3: Start of string data
@@ -8723,8 +8826,8 @@
__ movq(arg1, rax);
// Locate the code entry and call it.
- __ addq(r12, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(r12, kRegExpExecuteArguments);
+ __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(r11, kRegExpExecuteArguments);
// rsi is caller save, as it is used to pass parameter.
__ pop(rsi);
@@ -8938,7 +9041,7 @@
void CompareStub::Generate(MacroAssembler* masm) {
- Label call_builtin, done;
+ Label check_unequal_objects, done;
// The compare stub returns a positive, negative, or zero 64-bit integer
// value in rax, corresponding to result of comparing the two inputs.
// NOTICE! This code is only reached after a smi-fast-case check, so
@@ -8975,14 +9078,14 @@
// If it's not a heap number, then return equal for (in)equality operator.
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
Factory::heap_number_map());
- if (cc_ == equal) {
- __ j(equal, &heap_number);
- __ Set(rax, EQUAL);
- __ ret(0);
- } else {
- // Identical objects must still be converted to primitive for < and >.
- __ j(not_equal, ¬_identical);
+ __ j(equal, &heap_number);
+ if (cc_ != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(above_equal, ¬_identical);
}
+ __ Set(rax, EQUAL);
+ __ ret(0);
__ bind(&heap_number);
// It is a heap number, so return equal if it's not NaN.
@@ -9113,7 +9216,8 @@
__ bind(&check_for_strings);
- __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &call_builtin);
+ __ JumpIfNotBothSequentialAsciiStrings(
+ rdx, rax, rcx, rbx, &check_unequal_objects);
// Inline comparison of ascii strings.
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@@ -9128,7 +9232,40 @@
__ Abort("Unexpected fall-through from string comparison");
#endif
- __ bind(&call_builtin);
+ __ bind(&check_unequal_objects);
+ if (cc_ == equal && !strict_) {
+ // Not strict equality. Objects are unequal if
+ // they are both JSObjects and not undetectable,
+ // and their pointers are different.
+ Label not_both_objects, return_unequal;
+ // At most one is a smi, so we can test for smi by adding the two.
+ // A smi plus a heap object has the low bit set, a heap object plus
+ // a heap object has the low bit clear.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(V8_UINT64_C(1), kSmiTagMask);
+ __ lea(rcx, Operand(rax, rdx, times_1, 0));
+ __ testb(rcx, Immediate(kSmiTagMask));
+ __ j(not_zero, ¬_both_objects);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ j(below, ¬_both_objects);
+ __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, ¬_both_objects);
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &return_unequal);
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &return_unequal);
+ // The objects are both undetectable, so they both compare as the value
+ // undefined, and are equal.
+ __ Set(rax, EQUAL);
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax,
+ // or return equal if we fell through to here.
+ __ ret(2 * kPointerSize); // rax, rdx were pushed
+ __ bind(¬_both_objects);
+ }
+
// must swap argument order
__ pop(rcx);
__ pop(rdx);
@@ -9488,7 +9625,7 @@
// rbp: frame pointer (restored after C call).
// rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
- // r15: pointer to the first argument (C callee-saved).
+ // r12: pointer to the first argument (C callee-saved).
// This pointer is reused in LeaveExitFrame(), so it is stored in a
// callee-saved register.
@@ -9529,7 +9666,7 @@
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
// Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
__ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
- __ movq(Operand(rsp, 5 * kPointerSize), r15); // argv.
+ __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv.
if (result_size_ < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
@@ -9545,7 +9682,7 @@
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc.
- __ movq(rsi, r15); // argv.
+ __ movq(rsi, r12); // argv.
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
@@ -9747,7 +9884,7 @@
// rbp: frame pointer of exit frame (restored after C call).
// rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
- // r15: argv pointer (C callee-saved).
+ // r12: argv pointer (C callee-saved).
Label throw_normal_exception;
Label throw_termination_exception;
@@ -9807,24 +9944,38 @@
// Push the stack frame type marker twice.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ Push(Smi::FromInt(marker)); // context slot
- __ Push(Smi::FromInt(marker)); // function slot
- // Save callee-saved registers (X64 calling conventions).
+ // Scratch register is neither callee-save, nor an argument register on any
+ // platform. It's free to use at this point.
+ // Cannot use smi-register for loading yet.
+ __ movq(kScratchRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
+ RelocInfo::NONE);
+ __ push(kScratchRegister); // context slot
+ __ push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/Win64 calling conventions).
__ push(r12);
__ push(r13);
__ push(r14);
__ push(r15);
- __ push(rdi);
- __ push(rsi);
+#ifdef _WIN64
+ __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+#endif
__ push(rbx);
- // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
- // callee-save in JS code as well.
+ // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
+ // callee save as well.
// Save copies of the top frame descriptor on the stack.
ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
__ load_rax(c_entry_fp);
__ push(rax);
+ // Set up the roots and smi constant registers.
+ // Needs to be done before any further smi loads.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ movq(kRootRegister, roots_address);
+ __ InitializeSmiConstantRegister();
+
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
@@ -9895,8 +10046,11 @@
// Restore callee-saved registers (X64 conventions).
__ pop(rbx);
+#ifdef _WIN64
+ // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
__ pop(rsi);
__ pop(rdi);
+#endif
__ pop(r15);
__ pop(r14);
__ pop(r13);
@@ -11130,7 +11284,7 @@
// Check that both strings are non-external ascii strings.
__ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
- &string_add_runtime);
+ &string_add_runtime);
// Get the two characters forming the sub string.
__ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
@@ -11140,7 +11294,7 @@
// just allocate a new one.
Label make_two_character_string, make_flat_ascii_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, rbx, rcx, r14, r12, rdi, r15, &make_two_character_string);
+ masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
@@ -11232,7 +11386,7 @@
__ bind(&make_flat_ascii_string);
// Both strings are ascii strings. As they are short they are both flat.
- __ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
+ __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
// rcx: result string
__ movq(rbx, rcx);
// Locate first character of result.
@@ -11269,7 +11423,7 @@
__ j(not_zero, &string_add_runtime);
// Both strings are two byte strings. As they are short they are both
// flat.
- __ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
+ __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
// rcx: result string
__ movq(rbx, rcx);
// Locate first character of result.
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index cd03d2a..b9a3b70 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -393,6 +393,9 @@
// target (which can not be done more than once).
void GenerateReturnSequence(Result* return_value);
+ // Generate code for a fast smi loop.
+ void GenerateFastSmiLoop(ForStatement* node);
+
// Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode();
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index a92b248..9991981 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -56,7 +56,11 @@
class EntryFrameConstants : public AllStatic {
public:
+#ifdef _WIN64
static const int kCallerFPOffset = -10 * kPointerSize;
+#else
+ static const int kCallerFPOffset = -8 * kPointerSize;
+#endif
static const int kArgvOffset = 6 * kPointerSize;
};
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 31a806a..d04a7dc 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -61,11 +61,11 @@
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
-static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register r0,
- Register r1,
- Label* miss) {
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register r0,
+ Register r1,
+ Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// r0: used to hold receiver instance type.
@@ -98,34 +98,17 @@
}
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if name is not a symbol, and will jump to
-// the miss_label in that case.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register r0,
- Register r1,
- Register result) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is unchanged.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // r0 - used to hold the capacity of the property dictionary.
- //
- // r1 - used to hold the index into the property dictionary.
- //
- // result - holds the result on exit if the load succeeded.
-
- Label done;
-
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r1|. Jump to the |miss| label
+// otherwise.
+static void GenerateStringDictionaryProbes(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1) {
// Compute the capacity mask.
const int kCapacityOffset =
StringDictionary::kHeaderSize +
@@ -157,14 +140,58 @@
__ cmpq(name, Operand(elements, r1, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
if (i != kProbes - 1) {
- __ j(equal, &done);
+ __ j(equal, done);
} else {
- __ j(not_equal, miss_label);
+ __ j(not_equal, miss);
}
}
+}
- // Check that the value is a normal property.
+
+// Helper function used to load a property from a dictionary backing storage.
+// This function may return false negatives, so miss_label
+// must always call a backup property load that is complete.
+// This function is safe to call if name is not a symbol, and will jump to
+// the miss_label in that case.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss_label,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1,
+ Register result) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is unchanged.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // r0 - used to hold the capacity of the property dictionary.
+ //
+ // r1 - used to hold the index into the property dictionary.
+ //
+ // result - holds the result on exit if the load succeeded.
+
+ Label done;
+
+ // Probe the dictionary.
+ GenerateStringDictionaryProbes(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
+
+ // If probing finds an entry in the dictionary, r0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property.
__ bind(&done);
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ Test(Operand(elements, r1, times_pointer_size,
kDetailsOffset - kHeapObjectTag),
@@ -179,6 +206,75 @@
}
+// Helper function used to store a property to a dictionary backing
+// storage. This function may fail to store a property even though it
+// is in the dictionary, so code at miss_label must always call a
+// backup property store that is complete. This function is safe to
+// call if name is not a symbol, and will jump to the miss_label in
+// that case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss_label,
+ Register elements,
+ Register name,
+ Register value,
+ Register scratch0,
+ Register scratch1) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is clobbered.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // value - holds the value to store and is unchanged.
+ //
+ // scratch0 - used for index into the property dictionary and is clobbered.
+ //
+ // scratch1 - used to hold the capacity of the property dictionary and is
+ // clobbered.
+ Label done;
+
+ // Probe the dictionary.
+ GenerateStringDictionaryProbes(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ scratch0,
+ scratch1);
+
+ // If probing finds an entry in the dictionary, scratch0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property that is not read only.
+ __ bind(&done);
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask
+ = (PropertyDetails::TypeField::mask() |
+ PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ __ Test(Operand(elements,
+ scratch1,
+ times_pointer_size,
+ kDetailsOffset - kHeapObjectTag),
+ Smi::FromInt(kTypeAndReadOnlyMask));
+ __ j(not_zero, miss_label);
+
+ // Store the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ lea(scratch1, Operand(elements,
+ scratch1,
+ times_pointer_size,
+ kValueOffset - kHeapObjectTag));
+ __ movq(Operand(scratch1, 0), value);
+
+ // Update write barrier. Make sure not to clobber the value.
+ __ movq(scratch0, value);
+ __ RecordWrite(elements, scratch1, scratch0);
+}
+
+
static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
@@ -1332,7 +1428,7 @@
// Get the receiver of the function from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- GenerateDictionaryLoadReceiverCheck(masm, rdx, rax, rbx, &miss);
+ GenerateStringDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
// rax: elements
// Search the dictionary placing the result in rdi.
@@ -1616,7 +1712,7 @@
// -----------------------------------
Label miss;
- GenerateDictionaryLoadReceiverCheck(masm, rax, rdx, rbx, &miss);
+ GenerateStringDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss);
// rdx: elements
// Search the dictionary placing the result in rax.
@@ -1760,6 +1856,28 @@
}
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ Label miss, restore_miss;
+
+ GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
+
+ GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9);
+ __ IncrementCounter(&Counters::store_normal_hit, 1);
+ __ ret(0);
+
+ __ bind(&miss);
+ __ IncrementCounter(&Counters::store_normal_miss, 1);
+ GenerateMiss(masm);
+}
+
+
#undef __
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 3b2c789..76200d7 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -105,12 +105,6 @@
}
-// For page containing |object| mark region covering [object+offset] dirty.
-// object is the object being stored into, value is the object being stored.
-// If offset is zero, then the index register contains the array index into
-// the elements array represented a zero extended int32. Otherwise it can be
-// used as a scratch register.
-// All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
@@ -141,6 +135,35 @@
}
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
+ Register value) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are esi.
+ ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+ JumpIfSmi(value, &done);
+
+ InNewSpace(object, value, equal, &done);
+
+ RecordWriteHelper(object, address, value);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ }
+}
+
+
void MacroAssembler::RecordWriteNonSmi(Register object,
int offset,
Register scratch,
@@ -444,7 +467,7 @@
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
- xor_(dst, dst);
+ xorl(dst, dst);
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else if (is_uint32(x)) {
@@ -454,7 +477,6 @@
}
}
-
void MacroAssembler::Set(const Operand& dst, int64_t x) {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
@@ -469,6 +491,78 @@
static int kSmiShift = kSmiTagSize + kSmiShiftSize;
+Register MacroAssembler::GetSmiConstant(Smi* source) {
+ int value = source->value();
+ if (value == 0) {
+ xorl(kScratchRegister, kScratchRegister);
+ return kScratchRegister;
+ }
+ if (value == 1) {
+ return kSmiConstantRegister;
+ }
+ LoadSmiConstant(kScratchRegister, source);
+ return kScratchRegister;
+}
+
+void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
+ if (FLAG_debug_code) {
+ movq(dst,
+ reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
+ RelocInfo::NONE);
+ cmpq(dst, kSmiConstantRegister);
+ if (allow_stub_calls()) {
+ Assert(equal, "Uninitialized kSmiConstantRegister");
+ } else {
+ Label ok;
+ j(equal, &ok);
+ int3();
+ bind(&ok);
+ }
+ }
+ if (source->value() == 0) {
+ xorl(dst, dst);
+ return;
+ }
+ int value = source->value();
+ bool negative = value < 0;
+ unsigned int uvalue = negative ? -value : value;
+
+ switch (uvalue) {
+ case 9:
+ lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
+ break;
+ case 8:
+ xorl(dst, dst);
+ lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
+ break;
+ case 4:
+ xorl(dst, dst);
+ lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
+ break;
+ case 5:
+ lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
+ break;
+ case 3:
+ lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
+ break;
+ case 2:
+ lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
+ break;
+ case 1:
+ movq(dst, kSmiConstantRegister);
+ break;
+ case 0:
+ UNREACHABLE();
+ return;
+ default:
+ movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
+ return;
+ }
+ if (negative) {
+ neg(dst);
+ }
+}
+
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
ASSERT_EQ(0, kSmiTag);
if (!dst.is(src)) {
@@ -629,9 +723,10 @@
Condition MacroAssembler::CheckPositiveSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
+ // Make mask 0x8000000000000001 and test that both bits are zero.
movq(kScratchRegister, src);
rol(kScratchRegister, Immediate(1));
- testl(kScratchRegister, Immediate(0x03));
+ testb(kScratchRegister, Immediate(3));
return zero;
}
@@ -660,7 +755,6 @@
}
-
Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
if (first.is(second)) {
return CheckSmi(first);
@@ -673,11 +767,10 @@
Condition MacroAssembler::CheckIsMinSmi(Register src) {
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- movq(kScratchRegister, src);
- rol(kScratchRegister, Immediate(1));
- cmpq(kScratchRegister, Immediate(1));
- return equal;
+ ASSERT(!src.is(kScratchRegister));
+ // If we overflow by subtracting one, it's the minimal smi value.
+ cmpq(src, kSmiConstantRegister);
+ return overflow;
}
@@ -690,8 +783,8 @@
Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
// An unsigned 32-bit integer value is valid as long as the high bit
// is not set.
- testq(src, Immediate(0x80000000));
- return zero;
+ testl(src, src);
+ return positive;
}
@@ -784,10 +877,10 @@
}
Assert(no_overflow, "Smi subtraction overflow");
} else if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- subq(kScratchRegister, src2);
+ movq(kScratchRegister, src2);
+ cmpq(src1, kScratchRegister);
j(overflow, on_not_smi_result);
- movq(src1, kScratchRegister);
+ subq(src1, kScratchRegister);
} else {
movq(dst, src1);
subq(dst, src2);
@@ -860,7 +953,7 @@
JumpIfNotSmi(src, on_not_smi_result);
Register tmp = (dst.is(src) ? kScratchRegister : dst);
- Move(tmp, constant);
+ LoadSmiConstant(tmp, constant);
addq(tmp, src);
j(overflow, on_not_smi_result);
if (dst.is(src)) {
@@ -874,14 +967,46 @@
if (!dst.is(src)) {
movq(dst, src);
}
+ return;
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
-
- Move(kScratchRegister, constant);
- addq(dst, kScratchRegister);
+ switch (constant->value()) {
+ case 1:
+ addq(dst, kSmiConstantRegister);
+ return;
+ case 2:
+ lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ return;
+ case 4:
+ lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ return;
+ case 8:
+ lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ return;
+ default:
+ Register constant_reg = GetSmiConstant(constant);
+ addq(dst, constant_reg);
+ return;
+ }
} else {
- Move(dst, constant);
- addq(dst, src);
+ switch (constant->value()) {
+ case 1:
+ lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
+ return;
+ case 2:
+ lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ return;
+ case 4:
+ lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ return;
+ case 8:
+ lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ return;
+ default:
+ LoadSmiConstant(dst, constant);
+ addq(dst, src);
+ return;
+ }
}
}
@@ -904,12 +1029,12 @@
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- Move(kScratchRegister, constant);
- addq(kScratchRegister, dst);
+ LoadSmiConstant(kScratchRegister, constant);
+ addq(kScratchRegister, src);
j(overflow, on_not_smi_result);
movq(dst, kScratchRegister);
} else {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
addq(dst, src);
j(overflow, on_not_smi_result);
}
@@ -923,19 +1048,17 @@
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
-
- Move(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(constant);
+ subq(dst, constant_reg);
} else {
- // Subtract by adding the negative, to do it in two operations.
if (constant->value() == Smi::kMinValue) {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
addq(dst, src);
} else {
// Subtract by adding the negation.
- Move(dst, Smi::FromInt(-constant->value()));
+ LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
addq(dst, src);
}
}
@@ -957,11 +1080,11 @@
// We test the non-negativeness before doing the subtraction.
testq(src, src);
j(not_sign, on_not_smi_result);
- Move(kScratchRegister, constant);
+ LoadSmiConstant(kScratchRegister, constant);
subq(dst, kScratchRegister);
} else {
// Subtract by adding the negation.
- Move(kScratchRegister, Smi::FromInt(-constant->value()));
+ LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
addq(kScratchRegister, dst);
j(overflow, on_not_smi_result);
movq(dst, kScratchRegister);
@@ -972,13 +1095,13 @@
// We test the non-negativeness before doing the subtraction.
testq(src, src);
j(not_sign, on_not_smi_result);
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
addq(dst, src);
} else {
// Subtract by adding the negation.
- Move(dst, Smi::FromInt(-(constant->value())));
+ LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
addq(dst, src);
j(overflow, on_not_smi_result);
}
@@ -1132,10 +1255,10 @@
xor_(dst, dst);
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- Move(kScratchRegister, constant);
- and_(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(constant);
+ and_(dst, constant_reg);
} else {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
and_(dst, src);
}
}
@@ -1152,10 +1275,10 @@
void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- Move(kScratchRegister, constant);
- or_(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(constant);
+ or_(dst, constant_reg);
} else {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
or_(dst, src);
}
}
@@ -1172,10 +1295,10 @@
void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- Move(kScratchRegister, constant);
- xor_(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(constant);
+ xor_(dst, constant_reg);
} else {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
xor_(dst, src);
}
}
@@ -1343,6 +1466,7 @@
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
}
+
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
@@ -1568,8 +1692,8 @@
if (is_int32(smi)) {
push(Immediate(static_cast<int32_t>(smi)));
} else {
- Set(kScratchRegister, smi);
- push(kScratchRegister);
+ Register constant = GetSmiConstant(source);
+ push(constant);
}
}
@@ -2109,10 +2233,10 @@
movq(rax, rsi);
store_rax(context_address);
- // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
+ // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, r14, times_pointer_size, offset));
+ lea(r12, Operand(rbp, r14, times_pointer_size, offset));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
@@ -2158,7 +2282,7 @@
void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
// Registers:
- // r15 : argv
+ // r12 : argv
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
@@ -2178,7 +2302,7 @@
// Pop everything up to and including the arguments and the receiver
// from the caller stack.
- lea(rsp, Operand(r15, 1 * kPointerSize));
+ lea(rsp, Operand(r12, 1 * kPointerSize));
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Top::k_context_address);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 44573f3..a256ab8 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -47,8 +47,11 @@
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
-static const Register kScratchRegister = { 10 }; // r10.
-static const Register kRootRegister = { 13 }; // r13
+static const Register kScratchRegister = { 10 }; // r10.
+static const Register kSmiConstantRegister = { 15 }; // r15 (callee save).
+static const Register kRootRegister = { 13 }; // r13 (callee save).
+// Value of smi in kSmiConstantRegister.
+static const int kSmiConstantRegisterValue = 1;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
@@ -93,16 +96,27 @@
Condition cc,
Label* branch);
- // For page containing |object| mark region covering [object+offset] dirty.
- // object is the object being stored into, value is the object being stored.
- // If offset is zero, then the scratch register contains the array index into
- // the elements array represented as a Smi.
- // All registers are clobbered by the operation.
+ // For page containing |object| mark region covering [object+offset]
+ // dirty. |object| is the object being stored into, |value| is the
+ // object being stored. If |offset| is zero, then the |scratch|
+ // register contains the array index into the elements array
+ // represented as a Smi. All registers are clobbered by the
+ // operation. RecordWrite filters out smis so it does not update the
+ // write barrier if the value is a smi.
void RecordWrite(Register object,
int offset,
Register value,
Register scratch);
+ // For page containing |object| mark region covering [address]
+ // dirty. |object| is the object being stored into, |value| is the
+ // object being stored. All registers are clobbered by the
+ // operation. RecordWrite filters out smis so it does not update
+ // the write barrier if the value is a smi.
+ void RecordWrite(Register object,
+ Register address,
+ Register value);
+
// For page containing |object| mark region covering [object+offset] dirty.
// The value is known to not be a smi.
// object is the object being stored into, value is the object being stored.
@@ -191,6 +205,12 @@
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
+ void InitializeSmiConstantRegister() {
+ movq(kSmiConstantRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
+ RelocInfo::NONE);
+ }
+
// Conversions between tagged smi values and non-tagged integer values.
// Tag an integer value. The result must be known to be a valid smi value.
@@ -458,11 +478,12 @@
// Basic Smi operations.
void Move(Register dst, Smi* source) {
- Set(dst, reinterpret_cast<int64_t>(source));
+ LoadSmiConstant(dst, source);
}
void Move(const Operand& dst, Smi* source) {
- Set(dst, reinterpret_cast<int64_t>(source));
+ Register constant = GetSmiConstant(source);
+ movq(dst, constant);
}
void Push(Smi* smi);
@@ -809,6 +830,14 @@
private:
bool generating_stub_;
bool allow_stub_calls_;
+
+ // Returns a register holding the smi value. The register MUST NOT be
+ // modified. It may be the "smi 1 constant" register.
+ Register GetSmiConstant(Smi* value);
+
+ // Moves the smi value to the destination register.
+ void LoadSmiConstant(Register dst, Smi* value);
+
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
diff --git a/src/x64/register-allocator-x64-inl.h b/src/x64/register-allocator-x64-inl.h
index c7c18b3..c6bea3a 100644
--- a/src/x64/register-allocator-x64-inl.h
+++ b/src/x64/register-allocator-x64-inl.h
@@ -38,7 +38,8 @@
bool RegisterAllocator::IsReserved(Register reg) {
return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
- reg.is(kScratchRegister) || reg.is(kRootRegister);
+ reg.is(kScratchRegister) || reg.is(kRootRegister) ||
+ reg.is(kSmiConstantRegister);
}
@@ -58,11 +59,11 @@
5, // r8
6, // r9
-1, // r10 Scratch register.
- 9, // r11
- 10, // r12
+ 8, // r11
+ 9, // r12
-1, // r13 Roots array. This is callee saved.
7, // r14
- 8 // r15
+ -1 // r15 Smi constant register.
};
return kNumbers[reg.code()];
}
@@ -71,7 +72,7 @@
Register RegisterAllocator::ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters);
const Register kRegisters[] =
- { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r15, r11, r12 };
+ { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r11, r12 };
return kRegisters[num];
}
diff --git a/src/x64/register-allocator-x64.h b/src/x64/register-allocator-x64.h
index 8d666d2..a2884d9 100644
--- a/src/x64/register-allocator-x64.h
+++ b/src/x64/register-allocator-x64.h
@@ -33,7 +33,7 @@
class RegisterAllocatorConstants : public AllStatic {
public:
- static const int kNumRegisters = 11;
+ static const int kNumRegisters = 10;
static const int kInvalidRegister = -1;
};
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 1e103ac..ab75b96 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -2125,7 +2125,8 @@
Register scratch,
String* name,
int save_at_depth,
- Label* miss) {
+ Label* miss,
+ Register extra) {
// Check that the maps haven't changed.
Register result =
masm()->CheckMaps(object,
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 0549e3c..adf47e2 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -388,6 +388,13 @@
// Duplicate the top element of the frame.
void Dup() { PushFrameSlotAt(element_count() - 1); }
+ // Duplicate the n'th element from the top of the frame.
+ // Dup(1) is equivalent to Dup().
+ void Dup(int n) {
+ ASSERT(n > 0);
+ PushFrameSlotAt(element_count() - n);
+ }
+
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
Result Pop();