Update V8 to r5017 as required by WebKit r62496
Change-Id: I1b4b7718d1d77ceef07f543e9150a2cb3a628f3a
diff --git a/src/api.cc b/src/api.cc
index 464ca54..0f64dd4 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -2606,6 +2606,8 @@
return;
}
i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
+ self->set_map(
+ *i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map())));
self->set_elements(*pixels);
}
@@ -2659,6 +2661,8 @@
}
i::Handle<i::ExternalArray> array =
i::Factory::NewExternalArray(length, array_type, data);
+ self->set_map(
+ *i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map())));
self->set_elements(*array);
}
diff --git a/src/api.h b/src/api.h
index e7b1394..5c67136 100644
--- a/src/api.h
+++ b/src/api.h
@@ -134,16 +134,6 @@
};
-v8::Arguments::Arguments(v8::Local<v8::Value> data,
- v8::Local<v8::Object> holder,
- v8::Local<v8::Function> callee,
- bool is_construct_call,
- void** values, int length)
- : data_(data), holder_(holder), callee_(callee),
- is_construct_call_(is_construct_call),
- values_(values), length_(length) { }
-
-
enum ExtensionTraversalState {
UNVISITED, VISITED, INSTALLED
};
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index f8d98db..f5ff43a 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1802,6 +1802,16 @@
void Assembler::vmov(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond) {
+ // Dd = Dm
+ // Instruction details available in ARM DDI 0406B, A8-642.
+ emit(cond | 0xE*B24 | 0xB*B20 |
+ dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
+}
+
+
+void Assembler::vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
const Condition cond) {
@@ -2112,6 +2122,18 @@
}
+
+void Assembler::vsqrt(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond) {
+ // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
+}
+
+
// Pseudo instructions.
void Assembler::nop(int type) {
// This is mov rx, rx.
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 869227a..6a4fb23 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -930,6 +930,10 @@
const Register base,
int offset, // Offset must be a multiple of 4.
const Condition cond = al);
+
+ void vmov(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
@@ -988,6 +992,9 @@
const Condition cond = al);
void vmrs(const Register dst,
const Condition cond = al);
+ void vsqrt(const DwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond = al);
// Pseudo instructions
void nop(int type = 0);
@@ -1110,6 +1117,7 @@
void EndBlockConstPool() {
const_pool_blocked_nesting_--;
}
+ bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; }
private:
// Code buffer:
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 8e87614..4d18727 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -157,6 +157,7 @@
state_(NULL),
loop_nesting_(0),
type_info_(NULL),
+ function_return_(JumpTarget::BIDIRECTIONAL),
function_return_is_shadowed_(false) {
}
@@ -218,7 +219,7 @@
// for stack overflow.
frame_->AllocateStackSlots();
- VirtualFrame::SpilledScope spilled_scope(frame_);
+ frame_->AssertIsSpilled();
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
// Allocate local context.
@@ -257,6 +258,7 @@
// order: such a parameter is copied repeatedly into the same
// context location and thus the last value is what is seen inside
// the function.
+ frame_->AssertIsSpilled();
for (int i = 0; i < scope()->num_parameters(); i++) {
Variable* par = scope()->parameter(i);
Slot* slot = par->slot();
@@ -282,8 +284,7 @@
// Initialize ThisFunction reference if present.
if (scope()->is_function_scope() && scope()->function() != NULL) {
- __ mov(ip, Operand(Factory::the_hole_value()));
- frame_->EmitPush(ip);
+ frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
}
} else {
@@ -510,7 +511,6 @@
has_valid_frame() &&
!has_cc() &&
frame_->height() == original_height) {
- frame_->SpillAll();
true_target->Jump();
}
}
@@ -535,22 +535,18 @@
if (has_cc()) {
// Convert cc_reg_ into a boolean value.
- VirtualFrame::SpilledScope scope(frame_);
JumpTarget loaded;
JumpTarget materialize_true;
materialize_true.Branch(cc_reg_);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- frame_->EmitPush(r0);
+ frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
loaded.Jump();
materialize_true.Bind();
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- frame_->EmitPush(r0);
+ frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
loaded.Bind();
cc_reg_ = al;
}
if (true_target.is_linked() || false_target.is_linked()) {
- VirtualFrame::SpilledScope scope(frame_);
// We have at least one condition value that has been "translated"
// into a branch, thus it needs to be loaded explicitly.
JumpTarget loaded;
@@ -561,8 +557,7 @@
// Load "true" if necessary.
if (true_target.is_linked()) {
true_target.Bind();
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- frame_->EmitPush(r0);
+ frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
}
// If both "true" and "false" need to be loaded jump across the code for
// "false".
@@ -572,8 +567,7 @@
// Load "false" if necessary.
if (false_target.is_linked()) {
false_target.Bind();
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- frame_->EmitPush(r0);
+ frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
}
// A value is loaded on all paths reaching this point.
loaded.Bind();
@@ -592,11 +586,11 @@
void CodeGenerator::LoadGlobalReceiver(Register scratch) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ ldr(scratch,
- FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
- frame_->EmitPush(scratch);
+ Register reg = frame_->GetTOSRegister();
+ __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(reg,
+ FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
+ frame_->EmitPush(reg);
}
@@ -613,8 +607,6 @@
void CodeGenerator::StoreArgumentsObject(bool initial) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
ArgumentsAllocationMode mode = ArgumentsMode();
ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
@@ -623,9 +615,9 @@
// When using lazy arguments allocation, we store the hole value
// as a sentinel indicating that the arguments object hasn't been
// allocated yet.
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- frame_->EmitPush(ip);
+ frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
} else {
+ frame_->SpillAll();
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ ldr(r2, frame_->Function());
// The receiver is below the arguments, the return address, and the
@@ -649,9 +641,9 @@
// already been written to. This can happen if the a function
// has a local variable named 'arguments'.
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
- frame_->EmitPop(r0);
+ Register arguments = frame_->PopToRegister();
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(arguments, ip);
done.Branch(ne);
}
StoreToSlot(arguments->slot(), NOT_CONST_INIT);
@@ -754,40 +746,45 @@
// may jump to 'false_target' in case the register converts to 'false'.
void CodeGenerator::ToBoolean(JumpTarget* true_target,
JumpTarget* false_target) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
// Note: The generated code snippet does not change stack variables.
// Only the condition code should be set.
- frame_->EmitPop(r0);
+ bool known_smi = frame_->KnownSmiAt(0);
+ Register tos = frame_->PopToRegister();
// Fast case checks
// Check if the value is 'false'.
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
- false_target->Branch(eq);
+ if (!known_smi) {
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(tos, ip);
+ false_target->Branch(eq);
- // Check if the value is 'true'.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- true_target->Branch(eq);
+ // Check if the value is 'true'.
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(tos, ip);
+ true_target->Branch(eq);
- // Check if the value is 'undefined'.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- false_target->Branch(eq);
+ // Check if the value is 'undefined'.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(tos, ip);
+ false_target->Branch(eq);
+ }
// Check if the value is a smi.
- __ cmp(r0, Operand(Smi::FromInt(0)));
- false_target->Branch(eq);
- __ tst(r0, Operand(kSmiTagMask));
- true_target->Branch(eq);
+ __ cmp(tos, Operand(Smi::FromInt(0)));
- // Slow case: call the runtime.
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kToBool, 1);
- // Convert the result (r0) to a condition code.
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
+ if (!known_smi) {
+ false_target->Branch(eq);
+ __ tst(tos, Operand(kSmiTagMask));
+ true_target->Branch(eq);
+
+ // Slow case: call the runtime.
+ frame_->EmitPush(tos);
+ frame_->CallRuntime(Runtime::kToBool, 1);
+ // Convert the result (r0) to a condition code.
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r0, ip);
+ }
cc_reg_ = ne;
}
@@ -935,7 +932,15 @@
};
+
+// On entry the non-constant side of the binary operation is in tos_register_
+// and the constant smi side is nowhere. The tos_register_ is not used by the
+// virtual frame. On exit the answer is in the tos_register_ and the virtual
+// frame is unchanged.
void DeferredInlineSmiOperation::Generate() {
+ VirtualFrame copied_frame(*frame_state()->frame());
+ copied_frame.SpillAll();
+
Register lhs = r1;
Register rhs = r0;
switch (op_) {
@@ -969,44 +974,19 @@
case Token::MOD:
case Token::BIT_OR:
case Token::BIT_XOR:
- case Token::BIT_AND: {
- if (reversed_) {
- if (tos_register_.is(r0)) {
- __ mov(r1, Operand(Smi::FromInt(value_)));
- } else {
- ASSERT(tos_register_.is(r1));
- __ mov(r0, Operand(Smi::FromInt(value_)));
- lhs = r0;
- rhs = r1;
- }
- } else {
- if (tos_register_.is(r1)) {
- __ mov(r0, Operand(Smi::FromInt(value_)));
- } else {
- ASSERT(tos_register_.is(r0));
- __ mov(r1, Operand(Smi::FromInt(value_)));
- lhs = r0;
- rhs = r1;
- }
- }
- break;
- }
-
+ case Token::BIT_AND:
case Token::SHL:
case Token::SHR:
case Token::SAR: {
- if (!reversed_) {
- if (tos_register_.is(r1)) {
- __ mov(r0, Operand(Smi::FromInt(value_)));
- } else {
- ASSERT(tos_register_.is(r0));
- __ mov(r1, Operand(Smi::FromInt(value_)));
+ if (tos_register_.is(r1)) {
+ __ mov(r0, Operand(Smi::FromInt(value_)));
+ } else {
+ ASSERT(tos_register_.is(r0));
+ __ mov(r1, Operand(Smi::FromInt(value_)));
+ }
+ if (reversed_ == tos_register_.is(r1)) {
lhs = r0;
rhs = r1;
- }
- } else {
- ASSERT(op_ == Token::SHL);
- __ mov(r1, Operand(Smi::FromInt(value_)));
}
break;
}
@@ -1019,11 +999,17 @@
GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
__ CallStub(&stub);
+
// The generic stub returns its value in r0, but that's not
// necessarily what we want. We want whatever the inlined code
// expected, which is that the answer is in the same register as
// the operand was.
__ Move(tos_register_, r0);
+
+ // The tos register was not in use for the virtual frame that we
+ // came into this function with, so we can merge back to that frame
+ // without trashing it.
+ copied_frame.MergeTo(frame_state()->frame());
}
@@ -1124,12 +1110,6 @@
// We move the top of stack to a register (normally no move is invoved).
Register tos = frame_->PopToRegister();
- // All other registers are spilled. The deferred code expects one argument
- // in a register and all other values are flushed to the stack. The
- // answer is returned in the same register that the top of stack argument was
- // in.
- frame_->SpillAll();
-
switch (op) {
case Token::ADD: {
DeferredCode* deferred =
@@ -1448,8 +1428,6 @@
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
CallFunctionFlags flags,
int position) {
- frame_->AssertIsSpilled();
-
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
@@ -1482,7 +1460,6 @@
// stack, as receiver and arguments, and calls x.
// In the implementation comments, we call x the applicand
// and y the receiver.
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
@@ -1500,6 +1477,15 @@
Load(receiver);
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ // At this point the top two stack elements are probably in registers
+ // since they were just loaded. Ensure they are in regs and get the
+ // regs.
+ Register receiver_reg = frame_->Peek2();
+ Register arguments_reg = frame_->Peek();
+
+ // From now on the frame is spilled.
+ frame_->SpillAll();
+
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
@@ -1513,32 +1499,30 @@
// already. If so, just use that instead of copying the arguments
// from the stack. This also deals with cases where a local variable
// named 'arguments' has been introduced.
- __ ldr(r0, MemOperand(sp, 0));
-
- Label slow, done;
+ JumpTarget slow;
+ Label done;
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(ip, r0);
- __ b(ne, &slow);
+ __ cmp(ip, arguments_reg);
+ slow.Branch(ne);
Label build_args;
// Get rid of the arguments object probe.
frame_->Drop();
// Stack now has 3 elements on it.
// Contents of stack at this point:
- // sp[0]: receiver
+ // sp[0]: receiver - in the receiver_reg register.
// sp[1]: applicand.apply
// sp[2]: applicand.
// Check that the receiver really is a JavaScript object.
- __ ldr(r0, MemOperand(sp, 0));
- __ BranchOnSmi(r0, &build_args);
+ __ BranchOnSmi(receiver_reg, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CompareObjectType(r0, r1, r2, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &build_args);
// Check that applicand.apply is Function.prototype.apply.
@@ -1627,7 +1611,7 @@
StoreArgumentsObject(false);
// Stack and frame now have 4 elements.
- __ bind(&slow);
+ slow.Bind();
// Generic computation of x.apply(y, args) with no special optimization.
// Flip applicand.apply and applicand on the stack, so
@@ -1652,7 +1636,6 @@
void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(has_cc());
Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
target->Branch(cc);
@@ -1661,7 +1644,7 @@
void CodeGenerator::CheckStack() {
- VirtualFrame::SpilledScope spilled_scope(frame_);
+ frame_->SpillAll();
Comment cmnt(masm_, "[ check stack");
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
// Put the lr setup instruction in the delay slot. kInstrSize is added to
@@ -1683,7 +1666,6 @@
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
Visit(statements->at(i));
}
@@ -1695,7 +1677,6 @@
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Block");
CodeForStatementPosition(node);
node->break_target()->SetExpectedHeight();
@@ -1713,7 +1694,6 @@
frame_->EmitPush(Operand(pairs));
frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
// The result is discarded.
}
@@ -1754,7 +1734,6 @@
frame_->EmitPush(Operand(0));
}
- VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
// Ignore the return value (declarations are statements).
@@ -1772,11 +1751,15 @@
val = node->fun(); // NULL if we don't have a function
}
+
if (val != NULL) {
+ WriteBarrierCharacter wb_info =
+ val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
+ if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
// Set initial value.
Reference target(this, node->proxy());
Load(val);
- target.SetValue(NOT_CONST_INIT);
+ target.SetValue(NOT_CONST_INIT, wb_info);
// Get rid of the assigned value (declarations are statements).
frame_->Drop();
@@ -1899,7 +1882,6 @@
void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ContinueStatement");
CodeForStatementPosition(node);
node->target()->continue_target()->Jump();
@@ -1907,7 +1889,6 @@
void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ BreakStatement");
CodeForStatementPosition(node);
node->target()->break_target()->Jump();
@@ -1915,7 +1896,7 @@
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
+ frame_->SpillAll();
Comment cmnt(masm_, "[ ReturnStatement");
CodeForStatementPosition(node);
@@ -1926,7 +1907,7 @@
} else {
// Pop the result from the frame and prepare the frame for
// returning thus making it easier to merge.
- frame_->EmitPop(r0);
+ frame_->PopToR0();
frame_->PrepareForReturn();
if (function_return_.is_bound()) {
// If the function return label is already bound we reuse the
@@ -1986,7 +1967,6 @@
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WithEnterStatement");
CodeForStatementPosition(node);
Load(node->expression());
@@ -2012,7 +1992,6 @@
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WithExitStatement");
CodeForStatementPosition(node);
// Pop context.
@@ -2027,7 +2006,6 @@
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ SwitchStatement");
CodeForStatementPosition(node);
node->break_target()->SetExpectedHeight();
@@ -2055,8 +2033,7 @@
next_test.Bind();
next_test.Unuse();
// Duplicate TOS.
- __ ldr(r0, frame_->Top());
- frame_->EmitPush(r0);
+ frame_->Dup();
Comparison(eq, NULL, clause->label(), true);
Branch(false, &next_test);
@@ -2094,7 +2071,7 @@
default_entry.Bind();
VisitStatements(default_clause->statements());
// If control flow can fall out of the default and there is a case after
- // it, jup to that case's body.
+ // it, jump to that case's body.
if (frame_ != NULL && default_exit.is_bound()) {
default_exit.Jump();
}
@@ -2116,7 +2093,6 @@
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ DoWhileStatement");
CodeForStatementPosition(node);
node->break_target()->SetExpectedHeight();
@@ -2191,7 +2167,6 @@
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WhileStatement");
CodeForStatementPosition(node);
@@ -2209,7 +2184,7 @@
node->continue_target()->Bind();
if (info == DONT_KNOW) {
- JumpTarget body;
+ JumpTarget body(JumpTarget::BIDIRECTIONAL);
LoadCondition(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A NULL frame indicates that control did not fall out of the
@@ -2242,7 +2217,6 @@
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ForStatement");
CodeForStatementPosition(node);
if (node->init() != NULL) {
@@ -2521,13 +2495,13 @@
if (each.size() > 0) {
__ ldr(r0, frame_->ElementAt(each.size()));
frame_->EmitPush(r0);
- each.SetValue(NOT_CONST_INIT);
+ each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
frame_->Drop(2);
} else {
// If the reference was to a slot we rely on the convenient property
// that it doesn't matter whether a value (eg, r3 pushed above) is
// right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
+ each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
frame_->Drop();
}
}
@@ -2931,7 +2905,6 @@
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Conditional");
JumpTarget then;
JumpTarget else_;
@@ -2972,10 +2945,8 @@
&done);
slow.Bind();
- VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->EmitPush(cp);
- __ mov(r0, Operand(slot->var()->name()));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(slot->var()->name()));
if (typeof_state == INSIDE_TYPEOF) {
frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
@@ -2990,16 +2961,17 @@
Register scratch = VirtualFrame::scratch0();
TypeInfo info = type_info(slot);
frame_->EmitPush(SlotOperand(slot, scratch), info);
+
if (slot->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't been
// initialized yet) which needs to be converted into the 'undefined'
// value.
Comment cmnt(masm_, "[ Unhole const");
- frame_->EmitPop(scratch);
+ Register tos = frame_->PopToRegister();
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
- frame_->EmitPush(scratch);
+ __ cmp(tos, ip);
+ __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
+ frame_->EmitPush(tos);
}
}
}
@@ -3007,6 +2979,7 @@
void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
TypeofState state) {
+ VirtualFrame::RegisterAllocationScope scope(this);
LoadFromSlot(slot, state);
// Bail out quickly if we're not using lazy arguments allocation.
@@ -3015,17 +2988,15 @@
// ... or if the slot isn't a non-parameter arguments slot.
if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
- VirtualFrame::SpilledScope spilled_scope(frame_);
-
- // Load the loaded value from the stack into r0 but leave it on the
+ // Load the loaded value from the stack into a register but leave it on the
// stack.
- __ ldr(r0, MemOperand(sp, 0));
+ Register tos = frame_->Peek();
// If the loaded value is the sentinel that indicates that we
// haven't loaded the arguments object yet, we need to do it now.
JumpTarget exit;
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(tos, ip);
exit.Branch(ne);
frame_->Drop();
StoreArgumentsObject(false);
@@ -3035,14 +3006,13 @@
void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
ASSERT(slot != NULL);
+ VirtualFrame::RegisterAllocationScope scope(this);
if (slot->type() == Slot::LOOKUP) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(slot->var()->is_dynamic());
// For now, just do a runtime call.
frame_->EmitPush(cp);
- __ mov(r0, Operand(slot->var()->name()));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(slot->var()->name()));
if (init_state == CONST_INIT) {
// Same as the case for a normal store, but ignores attribute
@@ -3071,7 +3041,7 @@
} else {
ASSERT(!slot->var()->is_dynamic());
Register scratch = VirtualFrame::scratch0();
- VirtualFrame::RegisterAllocationScope scope(this);
+ Register scratch2 = VirtualFrame::scratch1();
// The frame must be spilled when branching to this target.
JumpTarget exit;
@@ -3085,7 +3055,6 @@
__ ldr(scratch, SlotOperand(slot, scratch));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
- frame_->SpillAll();
exit.Branch(ne);
}
@@ -3104,18 +3073,18 @@
// Skip write barrier if the written value is a smi.
__ tst(tos, Operand(kSmiTagMask));
// We don't use tos any more after here.
- VirtualFrame::SpilledScope spilled_scope(frame_);
exit.Branch(eq);
// scratch is loaded with context when calling SlotOperand above.
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- // r1 could be identical with tos, but that doesn't matter.
- __ RecordWrite(scratch, Operand(offset), r3, r1);
+ // We need an extra register. Until we have a way to do that in the
+ // virtual frame we will cheat and ask for a free TOS register.
+ Register scratch3 = frame_->GetTOSRegister();
+ __ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
}
// If we definitely did not jump over the assignment, we do not need
// to bind the exit label. Doing so can defeat peephole
// optimization.
if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
- frame_->SpillAll();
exit.Bind();
}
}
@@ -3289,42 +3258,51 @@
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ RexExp Literal");
+ Register tmp = VirtualFrame::scratch0();
+ // Free up a TOS register that can be used to push the literal.
+ Register literal = frame_->GetTOSRegister();
+
// Retrieve the literal array and check the allocated entry.
// Load the function of this activation.
- __ ldr(r1, frame_->Function());
+ __ ldr(tmp, frame_->Function());
// Load the literals array of the function.
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+ __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
// Load the literal at the ast saved index.
int literal_offset =
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ ldr(r2, FieldMemOperand(r1, literal_offset));
+ __ ldr(literal, FieldMemOperand(tmp, literal_offset));
JumpTarget done;
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r2, ip);
+ __ cmp(literal, ip);
+ // This branch locks the virtual frame at the done label to match the
+ // one we have here, where the literal register is not on the stack and
+ // nothing is spilled.
done.Branch(ne);
- // If the entry is undefined we call the runtime system to computed
+ // If the entry is undefined we call the runtime system to compute
// the literal.
- frame_->EmitPush(r1); // literal array (0)
- __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
- frame_->EmitPush(r0); // literal index (1)
- __ mov(r0, Operand(node->pattern())); // RegExp pattern (2)
- frame_->EmitPush(r0);
- __ mov(r0, Operand(node->flags())); // RegExp flags (3)
- frame_->EmitPush(r0);
+ // literal array (0)
+ frame_->EmitPush(tmp);
+ // literal index (1)
+ frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
+ // RegExp pattern (2)
+ frame_->EmitPush(Operand(node->pattern()));
+ // RegExp flags (3)
+ frame_->EmitPush(Operand(node->flags()));
frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(r2, Operand(r0));
+ __ Move(literal, r0);
+ // This call to bind will get us back to the virtual frame we had before
+ // where things are not spilled and the literal register is not on the stack.
done.Bind();
// Push the literal.
- frame_->EmitPush(r2);
+ frame_->EmitPush(literal);
ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3333,20 +3311,20 @@
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ObjectLiteral");
+ Register literal = frame_->GetTOSRegister();
// Load the function of this activation.
- __ ldr(r3, frame_->Function());
+ __ ldr(literal, frame_->Function());
// Literal array.
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+ __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
+ frame_->EmitPush(literal);
// Literal index.
- __ mov(r2, Operand(Smi::FromInt(node->literal_index())));
+ frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
// Constant properties.
- __ mov(r1, Operand(node->constant_properties()));
+ frame_->EmitPush(Operand(node->constant_properties()));
// Should the object literal have fast elements?
- __ mov(r0, Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
- frame_->EmitPushMultiple(4, r3.bit() | r2.bit() | r1.bit() | r0.bit());
+ frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
if (node->depth() > 1) {
frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
@@ -3369,37 +3347,33 @@
if (key->handle()->IsSymbol()) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Load(value);
- frame_->EmitPop(r0);
+ frame_->PopToR0();
+ // Fetch the object literal.
+ frame_->SpillAllButCopyTOSToR1();
__ mov(r2, Operand(key->handle()));
- __ ldr(r1, frame_->Top()); // Load the receiver.
frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
break;
}
// else fall through
case ObjectLiteral::Property::PROTOTYPE: {
- __ ldr(r0, frame_->Top());
- frame_->EmitPush(r0); // dup the result
+ frame_->Dup();
Load(key);
Load(value);
frame_->CallRuntime(Runtime::kSetProperty, 3);
break;
}
case ObjectLiteral::Property::SETTER: {
- __ ldr(r0, frame_->Top());
- frame_->EmitPush(r0);
+ frame_->Dup();
Load(key);
- __ mov(r0, Operand(Smi::FromInt(1)));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(Smi::FromInt(1)));
Load(value);
frame_->CallRuntime(Runtime::kDefineAccessor, 4);
break;
}
case ObjectLiteral::Property::GETTER: {
- __ ldr(r0, frame_->Top());
- frame_->EmitPush(r0);
+ frame_->Dup();
Load(key);
- __ mov(r0, Operand(Smi::FromInt(0)));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(Smi::FromInt(0)));
Load(value);
frame_->CallRuntime(Runtime::kDefineAccessor, 4);
break;
@@ -3414,16 +3388,16 @@
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ArrayLiteral");
+ Register tos = frame_->GetTOSRegister();
// Load the function of this activation.
- __ ldr(r2, frame_->Function());
+ __ ldr(tos, frame_->Function());
// Load the literals array of the function.
- __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
- __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
- __ mov(r0, Operand(node->constant_elements()));
- frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
+ __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
+ frame_->EmitPush(tos);
+ frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
+ frame_->EmitPush(Operand(node->constant_elements()));
int length = node->values()->length();
if (node->depth() > 1) {
frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
@@ -3450,10 +3424,10 @@
// The property must be set by generated code.
Load(value);
- frame_->EmitPop(r0);
-
+ frame_->PopToR0();
// Fetch the object literal.
- __ ldr(r1, frame_->Top());
+ frame_->SpillAllButCopyTOSToR1();
+
// Get the elements array.
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
@@ -3682,6 +3656,8 @@
// Evaluate the receiver subexpression.
Load(prop->obj());
+ WriteBarrierCharacter wb_info;
+
// Change to slow case in the beginning of an initialization block to
// avoid the quadratic behavior of repeatedly adding fast properties.
if (node->starts_initialization_block()) {
@@ -3703,7 +3679,7 @@
// [tos] : key
// [tos+1] : receiver
// [tos+2] : receiver if at the end of an initialization block
-
+ //
// Evaluate the right-hand side.
if (node->is_compound()) {
// For a compound assignment the right-hand side is a binary operation
@@ -3735,9 +3711,13 @@
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
inline_smi);
}
+ wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
} else {
// For non-compound assignment just load the right-hand side.
Load(node->value());
+ wb_info = node->value()->AsLiteral() != NULL ?
+ NEVER_NEWSPACE :
+ (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
}
// Stack layout:
@@ -3749,7 +3729,7 @@
// Perform the assignment. It is safe to ignore constants here.
ASSERT(node->op() != Token::INIT_CONST);
CodeForSourcePosition(node->position());
- EmitKeyedStore(prop->key()->type());
+ EmitKeyedStore(prop->key()->type(), wb_info);
frame_->EmitPush(r0);
// Stack layout:
@@ -3863,7 +3843,6 @@
// ------------------------------------------------------------------------
if (var != NULL && var->is_possibly_eval()) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
// ----------------------------------
// JavaScript example: 'eval(arg)' // eval is not known to be shadowed
// ----------------------------------
@@ -3877,8 +3856,7 @@
Load(function);
// Allocate a frame slot for the receiver.
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r2);
+ frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
// Load the arguments.
int arg_count = args->length();
@@ -3886,6 +3864,8 @@
Load(args->at(i));
}
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+
// If we know that eval can only be shadowed by eval-introduced
// variables we attempt to load the global eval function directly
// in generated code. If we succeed, there is no need to perform a
@@ -4315,22 +4295,205 @@
}
-// Generates the Math.pow method - currently just calls runtime.
+// Generates the Math.pow method.
void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
- frame_->CallRuntime(Runtime::kMath_pow, 2);
- frame_->EmitPush(r0);
+
+ if (!CpuFeatures::IsSupported(VFP3)) {
+ frame_->CallRuntime(Runtime::kMath_pow, 2);
+ frame_->EmitPush(r0);
+ } else {
+ CpuFeatures::Scope scope(VFP3);
+ JumpTarget runtime, done;
+ Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
+
+ Register scratch1 = VirtualFrame::scratch0();
+ Register scratch2 = VirtualFrame::scratch1();
+
+ // Get base and exponent to registers.
+ Register exponent = frame_->PopToRegister();
+ Register base = frame_->PopToRegister(exponent);
+ Register heap_number_map = no_reg;
+
+ // Set the frame for the runtime jump target. The code below jumps to the
+ // jump target label so the frame needs to be established before that.
+ ASSERT(runtime.entry_frame() == NULL);
+ runtime.set_entry_frame(frame_);
+
+ __ BranchOnNotSmi(exponent, &exponent_nonsmi);
+ __ BranchOnNotSmi(base, &base_nonsmi);
+
+ heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ // Exponent is a smi and base is a smi. Get the smi value into vfp register
+ // d1.
+ __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
+ __ b(&powi);
+
+ __ bind(&base_nonsmi);
+ // Exponent is smi and base is non smi. Get the double value from the base
+ // into vfp register d1.
+ __ ObjectToDoubleVFPRegister(base, d1,
+ scratch1, scratch2, heap_number_map, s0,
+ runtime.entry_label());
+
+ __ bind(&powi);
+
+ // Load 1.0 into d0.
+ __ mov(scratch2, Operand(0x3ff00000));
+ __ mov(scratch1, Operand(0));
+ __ vmov(d0, scratch1, scratch2);
+
+ // Get the absolute untagged value of the exponent and use that for the
+ // calculation.
+ __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
+ __ rsb(scratch1, scratch1, Operand(0), LeaveCC, mi); // Negate if negative.
+ __ vmov(d2, d0, mi); // 1.0 needed in d2 later if exponent is negative.
+
+ // Run through all the bits in the exponent. The result is calculated in d0
+ // and d1 holds base^(bit^2).
+ Label more_bits;
+ __ bind(&more_bits);
+ __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
+ __ vmul(d0, d0, d1, cs); // Multiply with base^(bit^2) if bit is set.
+ __ vmul(d1, d1, d1, ne); // Don't bother calculating next d1 if done.
+ __ b(ne, &more_bits);
+
+ // If exponent is positive we are done.
+ __ cmp(exponent, Operand(0));
+ __ b(ge, &allocate_return);
+
+ // If exponent is negative result is 1/result (d2 already holds 1.0 in that
+ // case). However if d0 has reached infinity this will not provide the
+ // correct result, so call runtime if that is the case.
+ __ mov(scratch2, Operand(0x7FF00000));
+ __ mov(scratch1, Operand(0));
+ __ vmov(d1, scratch1, scratch2); // Load infinity into d1.
+ __ vcmp(d0, d1);
+ __ vmrs(pc);
+ runtime.Branch(eq); // d0 reached infinity.
+ __ vdiv(d0, d2, d0);
+ __ b(&allocate_return);
+
+ __ bind(&exponent_nonsmi);
+ // Special handling of raising to the power of -0.5 and 0.5. First check
+ // that the value is a heap number and that the lower bits (which for both
+ // values are zero).
+ heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
+ __ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
+ __ cmp(scratch1, heap_number_map);
+ runtime.Branch(ne);
+ __ tst(scratch2, scratch2);
+ runtime.Branch(ne);
+
+ // Load the higher bits (which contains the floating point exponent).
+ __ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));
+
+ // Compare exponent with -0.5.
+ __ cmp(scratch1, Operand(0xbfe00000));
+ __ b(ne, ¬_minus_half);
+
+ // Get the double value from the base into vfp register d0.
+ __ ObjectToDoubleVFPRegister(base, d0,
+ scratch1, scratch2, heap_number_map, s0,
+ runtime.entry_label(),
+ AVOID_NANS_AND_INFINITIES);
+
+ // Load 1.0 into d2.
+ __ mov(scratch2, Operand(0x3ff00000));
+ __ mov(scratch1, Operand(0));
+ __ vmov(d2, scratch1, scratch2);
+
+ // Calculate the reciprocal of the square root. 1/sqrt(x) = sqrt(1/x).
+ __ vdiv(d0, d2, d0);
+ __ vsqrt(d0, d0);
+
+ __ b(&allocate_return);
+
+ __ bind(¬_minus_half);
+ // Compare exponent with 0.5.
+ __ cmp(scratch1, Operand(0x3fe00000));
+ runtime.Branch(ne);
+
+ // Get the double value from the base into vfp register d0.
+ __ ObjectToDoubleVFPRegister(base, d0,
+ scratch1, scratch2, heap_number_map, s0,
+ runtime.entry_label(),
+ AVOID_NANS_AND_INFINITIES);
+ __ vsqrt(d0, d0);
+
+ __ bind(&allocate_return);
+ Register scratch3 = r5;
+ __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
+ heap_number_map, runtime.entry_label());
+ __ mov(base, scratch3);
+ done.Jump();
+
+ runtime.Bind();
+
+ // Push back the arguments again for the runtime call.
+ frame_->EmitPush(base);
+ frame_->EmitPush(exponent);
+ frame_->CallRuntime(Runtime::kMath_pow, 2);
+ __ Move(base, r0);
+
+ done.Bind();
+ frame_->EmitPush(base);
+ }
}
-// Generates the Math.sqrt method - currently just calls runtime.
+// Generates the Math.sqrt method.
void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
- frame_->CallRuntime(Runtime::kMath_sqrt, 1);
- frame_->EmitPush(r0);
+
+ if (!CpuFeatures::IsSupported(VFP3)) {
+ frame_->CallRuntime(Runtime::kMath_sqrt, 1);
+ frame_->EmitPush(r0);
+ } else {
+ CpuFeatures::Scope scope(VFP3);
+ JumpTarget runtime, done;
+
+ Register scratch1 = VirtualFrame::scratch0();
+ Register scratch2 = VirtualFrame::scratch1();
+
+ // Get the value from the frame.
+ Register tos = frame_->PopToRegister();
+
+ // Set the frame for the runtime jump target. The code below jumps to the
+ // jump target label so the frame needs to be established before that.
+ ASSERT(runtime.entry_frame() == NULL);
+ runtime.set_entry_frame(frame_);
+
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ // Get the double value from the heap number into vfp register d0.
+ __ ObjectToDoubleVFPRegister(tos, d0,
+ scratch1, scratch2, heap_number_map, s0,
+ runtime.entry_label());
+
+ // Calculate the square root of d0 and place result in a heap number object.
+ __ vsqrt(d0, d0);
+ __ AllocateHeapNumberWithValue(
+ tos, d0, scratch1, scratch2, heap_number_map, runtime.entry_label());
+ done.Jump();
+
+ runtime.Bind();
+ // Push back the argument again for the runtime call.
+ frame_->EmitPush(tos);
+ frame_->CallRuntime(Runtime::kMath_sqrt, 1);
+ __ Move(tos, r0);
+
+ done.Bind();
+ frame_->EmitPush(tos);
+ }
}
@@ -5201,7 +5364,6 @@
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
@@ -5273,8 +5435,7 @@
break;
case Token::SUB: {
- VirtualFrame::SpilledScope spilled(frame_);
- frame_->EmitPop(r0);
+ frame_->PopToR0();
GenericUnaryOpStub stub(Token::SUB, overwrite);
frame_->CallStub(&stub, 0);
frame_->EmitPush(r0); // r0 has result
@@ -5282,23 +5443,28 @@
}
case Token::BIT_NOT: {
- // smi check
- VirtualFrame::SpilledScope spilled(frame_);
- frame_->EmitPop(r0);
- JumpTarget smi_label;
+ Register tos = frame_->PopToRegister();
+ JumpTarget not_smi_label;
JumpTarget continue_label;
- __ tst(r0, Operand(kSmiTagMask));
- smi_label.Branch(eq);
+ // Smi check.
+ __ tst(tos, Operand(kSmiTagMask));
+ not_smi_label.Branch(ne);
- GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
- frame_->CallStub(&stub, 0);
+ __ mvn(tos, Operand(tos));
+ __ bic(tos, tos, Operand(kSmiTagMask)); // Bit-clear inverted smi-tag.
+ frame_->EmitPush(tos);
+ // The fast case is the first to jump to the continue label, so it gets
+ // to decide the virtual frame layout.
continue_label.Jump();
- smi_label.Bind();
- __ mvn(r0, Operand(r0));
- __ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
+ not_smi_label.Bind();
+ frame_->SpillAll();
+ __ Move(r0, tos);
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ frame_->CallStub(&stub, 0);
+ frame_->EmitPush(r0);
+
continue_label.Bind();
- frame_->EmitPush(r0); // r0 has result
break;
}
@@ -5308,16 +5474,16 @@
break;
case Token::ADD: {
- VirtualFrame::SpilledScope spilled(frame_);
- frame_->EmitPop(r0);
+ Register tos = frame_->Peek();
// Smi check.
JumpTarget continue_label;
- __ tst(r0, Operand(kSmiTagMask));
+ __ tst(tos, Operand(kSmiTagMask));
continue_label.Branch(eq);
- frame_->EmitPush(r0);
+
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
+ frame_->EmitPush(r0);
+
continue_label.Bind();
- frame_->EmitPush(r0); // r0 has result
break;
}
default:
@@ -5335,6 +5501,7 @@
int original_height = frame_->height();
#endif
Comment cmnt(masm_, "[ CountOperation");
+ VirtualFrame::RegisterAllocationScope scope(this);
bool is_postfix = node->is_postfix();
bool is_increment = node->op() == Token::INC;
@@ -5358,7 +5525,7 @@
__ sub(value, value, Operand(Smi::FromInt(1)));
}
frame_->EmitPush(value);
- target.SetValue(NOT_CONST_INIT);
+ target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
if (is_postfix) frame_->Pop();
ASSERT_EQ(original_height + 1, frame_->height());
return;
@@ -5457,7 +5624,7 @@
// Set the target with the result, leaving the result on
// top of the stack. Removes the target from the stack if
// it has a non-zero size.
- if (!is_const) target.SetValue(NOT_CONST_INIT);
+ if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
}
// Postfix: Discard the new value and use the old.
@@ -5478,7 +5645,6 @@
// after evaluating the left hand side (due to the shortcut
// semantics), but the compiler must (statically) know if the result
// of compiling the binary operation is materialized or not.
- VirtualFrame::SpilledScope spilled_scope(frame_);
if (node->op() == Token::AND) {
JumpTarget is_true;
LoadCondition(node->left(), &is_true, false_target(), false);
@@ -5663,8 +5829,6 @@
if (left_is_null || right_is_null) {
Load(left_is_null ? right : left);
Register tos = frame_->PopToRegister();
- // JumpTargets can't cope with register allocation yet.
- frame_->SpillAll();
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(tos, ip);
@@ -5707,9 +5871,6 @@
LoadTypeofExpression(operation->expression());
Register tos = frame_->PopToRegister();
- // JumpTargets can't cope with register allocation yet.
- frame_->SpillAll();
-
Register scratch = VirtualFrame::scratch0();
if (check->Equals(Heap::number_symbol())) {
@@ -5830,7 +5991,6 @@
break;
case Token::IN: {
- VirtualFrame::SpilledScope scope(frame_);
Load(left);
Load(right);
frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
@@ -5839,7 +5999,6 @@
}
case Token::INSTANCEOF: {
- VirtualFrame::SpilledScope scope(frame_);
Load(left);
Load(right);
InstanceofStub stub;
@@ -5937,10 +6096,15 @@
};
+// Takes key and register in r0 and r1 or vice versa. Returns result
+// in r0.
void DeferredReferenceGetKeyedValue::Generate() {
ASSERT((key_.is(r0) && receiver_.is(r1)) ||
(key_.is(r1) && receiver_.is(r0)));
+ VirtualFrame copied_frame(*frame_state()->frame());
+ copied_frame.SpillAll();
+
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
__ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
@@ -5961,6 +6125,13 @@
// keyed load has been inlined.
__ nop(PROPERTY_ACCESS_INLINED);
+ // Now go back to the frame that we entered with. This will not overwrite
+ // the receiver or key registers since they were not in use when we came
+ // in. The instructions emitted by this merge are skipped over by the
+ // inline load patching mechanism when looking for the branch instruction
+ // that tells it where the code to patch is.
+ copied_frame.MergeTo(frame_state()->frame());
+
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the
// deferred code.
@@ -6114,7 +6285,6 @@
bool key_is_known_smi = frame_->KnownSmiAt(0);
Register key = frame_->PopToRegister();
Register receiver = frame_->PopToRegister(key);
- VirtualFrame::SpilledScope spilled(frame_);
// The deferred code expects key and receiver in registers.
DeferredReferenceGetKeyedValue* deferred =
@@ -6152,10 +6322,12 @@
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch2, ip);
- deferred->Branch(ne);
+ if (FLAG_debug_code) {
+ __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+ __ cmp(scratch2, ip);
+ __ Assert(eq, "JSObject with fast elements map has slow elements");
+ }
// Check that key is within bounds. Use unsigned comparison to handle
// negative keys.
@@ -6176,7 +6348,7 @@
__ mov(r0, scratch1);
// Make sure that the expected number of instructions are generated.
- ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatch,
+ ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
}
@@ -6185,7 +6357,8 @@
}
-void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
+void CodeGenerator::EmitKeyedStore(StaticType* key_type,
+ WriteBarrierCharacter wb_info) {
// Generate inlined version of the keyed store if the code is in a loop
// and the key is likely to be a smi.
if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
@@ -6201,12 +6374,21 @@
__ IncrementCounter(&Counters::keyed_store_inline, 1,
scratch1, scratch2);
+
+
// Load the value, key and receiver from the stack.
+ bool value_is_harmless = frame_->KnownSmiAt(0);
+ if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
+ bool key_is_smi = frame_->KnownSmiAt(1);
Register value = frame_->PopToRegister();
Register key = frame_->PopToRegister(value);
+ VirtualFrame::SpilledScope spilled(frame_);
Register receiver = r2;
frame_->EmitPop(receiver);
- VirtualFrame::SpilledScope spilled(frame_);
+
+#ifdef DEBUG
+ bool we_remembered_the_write_barrier = value_is_harmless;
+#endif
// The deferred code expects value, key and receiver in registers.
DeferredReferenceSetKeyedValue* deferred =
@@ -6214,12 +6396,23 @@
// Check that the value is a smi. As this inlined code does not set the
// write barrier it is only possible to store smi values.
- __ tst(value, Operand(kSmiTagMask));
- deferred->Branch(ne);
+ if (!value_is_harmless) {
+ // If the value is not likely to be a Smi then let's test the fixed array
+ // for new space instead. See below.
+ if (wb_info == LIKELY_SMI) {
+ __ tst(value, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+#ifdef DEBUG
+ we_remembered_the_write_barrier = true;
+#endif
+ }
+ }
- // Check that the key is a smi.
- __ tst(key, Operand(kSmiTagMask));
- deferred->Branch(ne);
+ if (!key_is_smi) {
+ // Check that the key is a smi.
+ __ tst(key, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ }
// Check that the receiver is a heap object.
__ tst(receiver, Operand(kSmiTagMask));
@@ -6235,24 +6428,35 @@
__ cmp(scratch1, key);
deferred->Branch(ls); // Unsigned less equal.
+ // Get the elements array from the receiver.
+ __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (!value_is_harmless && wb_info != LIKELY_SMI) {
+ Label ok;
+ __ and_(scratch2, scratch1, Operand(ExternalReference::new_space_mask()));
+ __ cmp(scratch2, Operand(ExternalReference::new_space_start()));
+ __ tst(value, Operand(kSmiTagMask), ne);
+ deferred->Branch(ne);
+#ifdef DEBUG
+ we_remembered_the_write_barrier = true;
+#endif
+ }
+ // Check that the elements array is not a dictionary.
+ __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
// The following instructions are the part of the inlined store keyed
// property code which can be patched. Therefore the exact number of
// instructions generated need to be fixed, so the constant pool is blocked
// while generating this code.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
- __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
+#ifdef DEBUG
+ Label check_inlined_codesize;
+ masm_->bind(&check_inlined_codesize);
+#endif
+
// Read the fixed array map from the constant pool (not from the root
// array) so that the value can be patched. When debugging, we patch this
// comparison to always fail so that we will hit the IC call in the
// deferred code which will allow the debugger to break for fast case
// stores.
-#ifdef DEBUG
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
-#endif
__ mov(scratch3, Operand(Factory::fixed_array_map()));
__ cmp(scratch2, scratch3);
deferred->Branch(ne);
@@ -6269,6 +6473,8 @@
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
}
+ ASSERT(we_remembered_the_write_barrier);
+
deferred->BindExit();
} else {
frame()->CallKeyedStoreIC();
@@ -6284,7 +6490,6 @@
#undef __
#define __ ACCESS_MASM(masm)
-
Handle<String> Reference::GetName() {
ASSERT(type_ == NAMED);
Property* property = expression_->AsProperty();
@@ -6367,7 +6572,7 @@
}
-void Reference::SetValue(InitState init_state) {
+void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
ASSERT(!is_illegal());
ASSERT(!cgen_->has_cc());
MacroAssembler* masm = cgen_->masm();
@@ -6399,7 +6604,7 @@
Property* property = expression_->AsProperty();
ASSERT(property != NULL);
cgen_->CodeForSourcePosition(property->position());
- cgen_->EmitKeyedStore(property->key()->type());
+ cgen_->EmitKeyedStore(property->key()->type(), wb_info);
frame->EmitPush(r0);
set_unloaded();
break;
@@ -6648,7 +6853,7 @@
__ bind(¬_special);
// Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
// Gets the wrong answer for 0, but we already checked for that case above.
- __ CountLeadingZeros(source_, mantissa, zeros_);
+ __ CountLeadingZeros(zeros_, source_, mantissa);
// Compute exponent and or it into the exponent register.
// We use mantissa as a scratch register here. Use a fudge factor to
// divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
@@ -7073,22 +7278,42 @@
// Fast negative check for symbol-to-symbol equality.
-static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
+static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
+ Label* possible_strings,
+ Label* not_both_strings) {
// r2 is object type of r0.
// Ensure that no non-strings have the symbol bit set.
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ Label object_test;
ASSERT(kSymbolTag != 0);
+ __ tst(r2, Operand(kIsNotStringMask));
+ __ b(ne, &object_test);
__ tst(r2, Operand(kIsSymbolMask));
- __ b(eq, slow);
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ __ b(eq, possible_strings);
+ __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
+ __ b(ge, not_both_strings);
__ tst(r3, Operand(kIsSymbolMask));
- __ b(eq, slow);
+ __ b(eq, possible_strings);
// Both are symbols. We already checked they weren't the same pointer
// so they are not equal.
__ mov(r0, Operand(1)); // Non-zero indicates not equal.
__ mov(pc, Operand(lr)); // Return.
+
+ __ bind(&object_test);
+ __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, not_both_strings);
+ __ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, not_both_strings);
+ // If both objects are undetectable, they are equal. Otherwise, they
+ // are not equal, since they are different objects and an object is not
+ // equal to undefined.
+ __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ and_(r0, r2, Operand(r3));
+ __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
+ __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
+ __ mov(pc, Operand(lr)); // Return.
}
@@ -7204,7 +7429,8 @@
void RecordWriteStub::Generate(MacroAssembler* masm) {
- __ RecordWriteHelper(object_, Operand(offset_), offset_, scratch_);
+ __ add(offset_, object_, Operand(offset_));
+ __ RecordWriteHelper(object_, offset_, scratch_);
__ Ret();
}
@@ -7301,9 +7527,10 @@
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
// symbols.
if (cc_ == eq && !strict_) {
- // Either jumps to slow or returns the answer. Assumes that r2 is the type
- // of r0 on entry.
- EmitCheckForSymbols(masm, &flat_string_check);
+ // Returns an answer for two symbols or two detectable objects.
+ // Otherwise jumps to string case or not both strings case.
+ // Assumes that r2 is the type of r0 on entry.
+ EmitCheckForSymbolsOrObjects(masm, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
@@ -7377,7 +7604,7 @@
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values.
- if (use_fp_registers) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ mov(r7, Operand(rhs, ASR, kSmiTagSize));
__ vmov(s15, r7);
@@ -7385,8 +7612,12 @@
__ mov(r7, Operand(lhs, ASR, kSmiTagSize));
__ vmov(s13, r7);
__ vcvt_f64_s32(d6, s13);
+ if (!use_fp_registers) {
+ __ vmov(r2, r3, d7);
+ __ vmov(r0, r1, d6);
+ }
} else {
- // Write Smi from rhs to r3 and r2 in double format. r3 is scratch.
+ // Write Smi from rhs to r3 and r2 in double format. r9 is scratch.
__ mov(r7, Operand(rhs));
ConvertToDoubleStub stub1(r3, r2, r7, r9);
__ push(lr);
@@ -7461,12 +7692,15 @@
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
- if (use_fp_registers) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi in r0 to double in d7.
__ mov(r7, Operand(r0, ASR, kSmiTagSize));
__ vmov(s15, r7);
__ vcvt_f64_s32(d7, s15);
+ if (!use_fp_registers) {
+ __ vmov(r2, r3, d7);
+ }
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
@@ -7517,12 +7751,15 @@
__ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
- if (use_fp_registers) {
+ if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi in r1 to double in d6.
__ mov(r7, Operand(r1, ASR, kSmiTagSize));
__ vmov(s13, r7);
__ vcvt_f64_s32(d6, s13);
+ if (!use_fp_registers) {
+ __ vmov(r0, r1, d6);
+ }
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
@@ -7806,7 +8043,11 @@
// The code below for writing into heap numbers isn't capable of writing
// the register as an unsigned int so we go to slow case if we hit this
// case.
- __ b(mi, &slow);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ __ b(mi, &result_not_a_smi);
+ } else {
+ __ b(mi, &slow);
+ }
break;
case Token::SHL:
// Use only the 5 least significant bits of the shift count.
@@ -7850,10 +8091,24 @@
// result.
__ mov(r0, Operand(r5));
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r2);
+ if (op_ == Token::SHR) {
+ __ vcvt_f64_u32(d0, s0);
+ } else {
+ __ vcvt_f64_s32(d0, s0);
+ }
+ __ sub(r3, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in r2 to the heap number in r0, using
+ // r3 as scratch. r0 is preserved and returned.
+ WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+ __ TailCallStub(&stub);
+ }
if (mode_ != NO_OVERWRITE) {
__ bind(&have_to_allocate);
@@ -7969,6 +8224,173 @@
}
+// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
+// trick. See http://en.wikipedia.org/wiki/Divisibility_rule
+// Takes the sum of the digits base (mask + 1) repeatedly until we have a
+// number from 0 to mask. On exit the 'eq' condition flags are set if the
+// answer is exactly the mask.
+void IntegerModStub::DigitSum(MacroAssembler* masm,
+ Register lhs,
+ int mask,
+ int shift,
+ Label* entry) {
+ ASSERT(mask > 0);
+ ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
+ Label loop;
+ __ bind(&loop);
+ __ and_(ip, lhs, Operand(mask));
+ __ add(lhs, ip, Operand(lhs, LSR, shift));
+ __ bind(entry);
+ __ cmp(lhs, Operand(mask));
+ __ b(gt, &loop);
+}
+
+
+void IntegerModStub::DigitSum(MacroAssembler* masm,
+ Register lhs,
+ Register scratch,
+ int mask,
+ int shift1,
+ int shift2,
+ Label* entry) {
+ ASSERT(mask > 0);
+ ASSERT(mask <= 0xff); // This ensures we don't need ip to use it.
+ Label loop;
+ __ bind(&loop);
+ __ bic(scratch, lhs, Operand(mask));
+ __ and_(ip, lhs, Operand(mask));
+ __ add(lhs, ip, Operand(lhs, LSR, shift1));
+ __ add(lhs, lhs, Operand(scratch, LSR, shift2));
+ __ bind(entry);
+ __ cmp(lhs, Operand(mask));
+ __ b(gt, &loop);
+}
+
+
+// Splits the number into two halves (bottom half has shift bits). The top
+// half is subtracted from the bottom half. If the result is negative then
+// rhs is added.
+void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
+ Register lhs,
+ int shift,
+ int rhs) {
+ int mask = (1 << shift) - 1;
+ __ and_(ip, lhs, Operand(mask));
+ __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
+ __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
+}
+
+
+void IntegerModStub::ModReduce(MacroAssembler* masm,
+ Register lhs,
+ int max,
+ int denominator) {
+ int limit = denominator;
+ while (limit * 2 <= max) limit *= 2;
+ while (limit >= denominator) {
+ __ cmp(lhs, Operand(limit));
+ __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
+ limit >>= 1;
+ }
+}
+
+
+void IntegerModStub::ModAnswer(MacroAssembler* masm,
+ Register result,
+ Register shift_distance,
+ Register mask_bits,
+ Register sum_of_digits) {
+ __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
+ __ Ret();
+}
+
+
+// See comment for class.
+void IntegerModStub::Generate(MacroAssembler* masm) {
+ __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
+ __ bic(odd_number_, odd_number_, Operand(1));
+ __ mov(odd_number_, Operand(odd_number_, LSL, 1));
+ // We now have (odd_number_ - 1) * 2 in the register.
+ // Build a switch out of branches instead of data because it avoids
+ // having to teach the assembler about intra-code-object pointers
+ // that are not in relative branch instructions.
+ Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
+ Label mod21, mod23, mod25;
+ { Assembler::BlockConstPoolScope block_const_pool(masm);
+ __ add(pc, pc, Operand(odd_number_));
+ // When you read pc it is always 8 ahead, but when you write it you always
+ // write the actual value. So we put in two nops to take up the slack.
+ __ nop();
+ __ nop();
+ __ b(&mod3);
+ __ b(&mod5);
+ __ b(&mod7);
+ __ b(&mod9);
+ __ b(&mod11);
+ __ b(&mod13);
+ __ b(&mod15);
+ __ b(&mod17);
+ __ b(&mod19);
+ __ b(&mod21);
+ __ b(&mod23);
+ __ b(&mod25);
+ }
+
+ // For each denominator we find a multiple that is almost only ones
+ // when expressed in binary. Then we do the sum-of-digits trick for
+ // that number. If the multiple is not 1 then we have to do a little
+ // more work afterwards to get the answer into the 0-denominator-1
+ // range.
+ DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11.
+ __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111.
+ ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111.
+ __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111.
+ ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111.
+ ModReduce(masm, lhs_, 0x3f, 11);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111.
+ ModReduce(masm, lhs_, 0xff, 13);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111.
+ __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111.
+ ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111.
+ ModReduce(masm, lhs_, 0xff, 19);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111.
+ ModReduce(masm, lhs_, 0x3f, 21);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101.
+ ModReduce(masm, lhs_, 0xff, 23);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+
+ DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101.
+ ModReduce(masm, lhs_, 0x7f, 25);
+ ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
+}
+
+
const char* GenericBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int len = 100;
@@ -8096,7 +8518,7 @@
case Token::MOD: {
Label not_smi;
if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
- Label smi_is_unsuitable;
+ Label lhs_is_unsuitable;
__ BranchOnNotSmi(lhs, ¬_smi);
if (IsPowerOf2(constant_rhs_)) {
if (op_ == Token::MOD) {
@@ -8117,14 +8539,14 @@
__ eor(rhs, rhs, Operand(0x80000000u), SetCC);
// Next two instructions are conditional on the answer being -0.
__ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
- __ b(eq, &smi_is_unsuitable);
+ __ b(eq, &lhs_is_unsuitable);
// We need to subtract the dividend. Eg. -3 % 4 == -3.
__ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
} else {
ASSERT(op_ == Token::DIV);
__ tst(lhs,
Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
- __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder.
+ __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder.
int shift = 0;
int d = constant_rhs_;
while ((d & 1) == 0) {
@@ -8137,7 +8559,7 @@
} else {
// Not a power of 2.
__ tst(lhs, Operand(0x80000000u));
- __ b(ne, &smi_is_unsuitable);
+ __ b(ne, &lhs_is_unsuitable);
// Find a fixed point reciprocal of the divisor so we can divide by
// multiplying.
double divisor = 1.0 / constant_rhs_;
@@ -8172,7 +8594,7 @@
// (lhs / rhs) where / indicates integer division.
if (op_ == Token::DIV) {
__ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
- __ b(ne, &smi_is_unsuitable); // There was a remainder.
+ __ b(ne, &lhs_is_unsuitable); // There was a remainder.
__ mov(result, Operand(scratch2, LSL, kSmiTagSize));
} else {
ASSERT(op_ == Token::MOD);
@@ -8180,14 +8602,21 @@
}
}
__ Ret();
- __ bind(&smi_is_unsuitable);
+ __ bind(&lhs_is_unsuitable);
} else if (op_ == Token::MOD &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
runtime_operands_type_ != BinaryOpIC::STRINGS) {
// Do generate a bit of smi code for modulus even though the default for
// modulus is not to do it, but as the ARM processor has no coprocessor
- // support for modulus checking for smis makes sense.
+ // support for modulus checking for smis makes sense. We can handle
+ // 1 to 25 times any power of 2. This covers over half the numbers from
+ // 1 to 100 including all of the first 25. (Actually the constants < 10
+ // are handled above by reciprocal multiplication. We only get here for
+ // those cases if the right hand side is not a constant or for cases
+ // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
+ // stub.)
Label slow;
+ Label not_power_of_2;
ASSERT(!ShouldGenerateSmiCode());
ASSERT(kSmiTag == 0); // Adjust code below.
// Check for two positive smis.
@@ -8195,13 +8624,42 @@
__ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
__ b(ne, &slow);
// Check that rhs is a power of two and not zero.
+ Register mask_bits = r3;
__ sub(scratch, rhs, Operand(1), SetCC);
__ b(mi, &slow);
- __ tst(rhs, scratch);
- __ b(ne, &slow);
+ __ and_(mask_bits, rhs, Operand(scratch), SetCC);
+ __ b(ne, ¬_power_of_2);
// Calculate power of two modulus.
__ and_(result, lhs, Operand(scratch));
__ Ret();
+
+ __ bind(¬_power_of_2);
+ __ eor(scratch, scratch, Operand(mask_bits));
+ // At least two bits are set in the modulus. The high one(s) are in
+ // mask_bits and the low one is scratch + 1.
+ __ and_(mask_bits, scratch, Operand(lhs));
+ Register shift_distance = scratch;
+ scratch = no_reg;
+
+ // The rhs consists of a power of 2 multiplied by some odd number.
+ // The power-of-2 part we handle by putting the corresponding bits
+ // from the lhs in the mask_bits register, and the power in the
+ // shift_distance register. Shift distance is never 0 due to Smi
+ // tagging.
+ __ CountLeadingZeros(r4, shift_distance, shift_distance);
+ __ rsb(shift_distance, r4, Operand(32));
+
+ // Now we need to find out what the odd number is. The last bit is
+ // always 1.
+ Register odd_number = r4;
+ __ mov(odd_number, Operand(rhs, LSR, shift_distance));
+ __ cmp(odd_number, Operand(25));
+ __ b(gt, &slow);
+
+ IntegerModStub stub(
+ result, shift_distance, odd_number, mask_bits, lhs, r5);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call.
+
__ bind(&slow);
}
HandleBinaryOpSlowCases(
@@ -8499,12 +8957,21 @@
__ mov(r0, Operand(r2));
}
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(r1, r0, r2);
- __ push(lr);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r1);
+ __ vcvt_f64_s32(d0, s0);
+ __ sub(r2, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
+ } else {
+ // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+ // have to set up a frame.
+ WriteInt32ToHeapNumberStub stub(r1, r0, r2);
+ __ push(lr);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
} else {
UNIMPLEMENTED();
}
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index be4d556..855723d 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -44,6 +44,7 @@
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
+enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
// -------------------------------------------------------------------------
@@ -100,7 +101,7 @@
// on the expression stack. The value is stored in the location specified
// by the reference, and is left on top of the stack, after the reference
// is popped from beneath it (unloaded).
- void SetValue(InitState init_state);
+ void SetValue(InitState init_state, WriteBarrierCharacter wb);
// This is in preparation for something that uses the reference on the stack.
// If we need this reference afterwards get then dup it now. Otherwise mark
@@ -276,7 +277,9 @@
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
// Constants related to patching of inlined load/store.
- static const int kInlinedKeyedLoadInstructionsAfterPatch = 17;
+ static int GetInlinedKeyedLoadInstructionsAfterPatch() {
+ return FLAG_debug_code ? 27 : 13;
+ }
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
private:
@@ -382,7 +385,7 @@
// Store a keyed property. Key and receiver are on the stack and the value is
// in r0. Result is returned in r0.
- void EmitKeyedStore(StaticType* key_type);
+ void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
@@ -879,6 +882,102 @@
};
+// This stub can do a fast mod operation without using fp.
+// It is tail called from the GenericBinaryOpStub and it always
+// returns an answer. It never causes GC so it doesn't need a real frame.
+//
+// The inputs are always positive Smis. This is never called
+// where the denominator is a power of 2. We handle that separately.
+//
+// If we consider the denominator as an odd number multiplied by a power of 2,
+// then:
+// * The exponent (power of 2) is in the shift_distance register.
+// * The odd number is in the odd_number register. It is always in the range
+// of 3 to 25.
+// * The bits from the numerator that are to be copied to the answer (there are
+// shift_distance of them) are in the mask_bits register.
+// * The other bits of the numerator have been shifted down and are in the lhs
+// register.
+class IntegerModStub : public CodeStub {
+ public:
+ IntegerModStub(Register result,
+ Register shift_distance,
+ Register odd_number,
+ Register mask_bits,
+ Register lhs,
+ Register scratch)
+ : result_(result),
+ shift_distance_(shift_distance),
+ odd_number_(odd_number),
+ mask_bits_(mask_bits),
+ lhs_(lhs),
+ scratch_(scratch) {
+ // We don't code these in the minor key, so they should always be the same.
+ // We don't really want to fix that since this stub is rather large and we
+ // don't want many copies of it.
+ ASSERT(shift_distance_.is(r9));
+ ASSERT(odd_number_.is(r4));
+ ASSERT(mask_bits_.is(r3));
+ ASSERT(scratch_.is(r5));
+ }
+
+ private:
+ Register result_;
+ Register shift_distance_;
+ Register odd_number_;
+ Register mask_bits_;
+ Register lhs_;
+ Register scratch_;
+
+ // Minor key encoding in 16 bits.
+ class ResultRegisterBits: public BitField<int, 0, 4> {};
+ class LhsRegisterBits: public BitField<int, 4, 4> {};
+
+ Major MajorKey() { return IntegerMod; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return ResultRegisterBits::encode(result_.code())
+ | LhsRegisterBits::encode(lhs_.code());
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "IntegerModStub"; }
+
+ // Utility functions.
+ void DigitSum(MacroAssembler* masm,
+ Register lhs,
+ int mask,
+ int shift,
+ Label* entry);
+ void DigitSum(MacroAssembler* masm,
+ Register lhs,
+ Register scratch,
+ int mask,
+ int shift1,
+ int shift2,
+ Label* entry);
+ void ModGetInRangeBySubtraction(MacroAssembler* masm,
+ Register lhs,
+ int shift,
+ int rhs);
+ void ModReduce(MacroAssembler* masm,
+ Register lhs,
+ int max,
+ int denominator);
+ void ModAnswer(MacroAssembler* masm,
+ Register result,
+ Register shift_distance,
+ Register mask_bits,
+ Register sum_of_digits);
+
+
+#ifdef DEBUG
+ void Print() { PrintF("IntegerModStub\n"); }
+#endif
+};
+
+
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index 4e186d1..002e4c1 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -85,7 +85,7 @@
const char* VFPRegisters::Name(int reg, bool is_double) {
ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
- return names_[reg + is_double ? kNumVFPSingleRegisters : 0];
+ return names_[reg + (is_double ? kNumVFPSingleRegisters : 0)];
}
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 4005369..a52417b 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1038,7 +1038,8 @@
// Dd = vmul(Dn, Dm)
// Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm)
-// VMRS
+// vmrs
+// Dd = vsqrt(Dm)
void Decoder::DecodeTypeVFP(Instr* instr) {
ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
@@ -1046,7 +1047,14 @@
if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) {
// Other data processing instructions
- if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+ if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
+ // vmov register to register.
+ if (instr->SzField() == 0x1) {
+ Format(instr, "vmov.f64'cond 'Dd, 'Dm");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
@@ -1056,6 +1064,8 @@
} else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Opc3Field() & 0x1)) {
DecodeVCMP(instr);
+ } else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) {
+ Format(instr, "vsqrt.f64'cond 'Dd, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index c6de4d8..97e6148 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -47,71 +47,80 @@
#define __ ACCESS_MASM(masm)
-// Helper function used from LoadIC/CallIC GenerateNormal.
-// receiver: Receiver. It is not clobbered if a jump to the miss label is
-// done
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as receiver or name clobbering
-// one of these in the case of not jumping to the miss label.
-// The three scratch registers need to be different from the receiver, name and
-// result.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register receiver,
- Register name,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DictionaryCheck check_dictionary) {
- // Main use of the scratch registers.
- // scratch1: Used to hold the property dictionary.
- // scratch2: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch3: Used as temporary.
- Label done;
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ b(eq, global_object);
+ __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ b(eq, global_object);
+ __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
+ __ b(eq, global_object);
+}
- // Check for the absence of an interceptor.
- // Load the map into scratch1.
- __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kMapOffset));
- // Bail out if the receiver has a named interceptor.
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
- __ tst(scratch2, Operand(1 << Map::kHasNamedInterceptor));
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register t0,
+ Register t1,
+ Label* miss) {
+ // Register usage:
+ // receiver: holds the receiver on entry and is unchanged.
+ // elements: holds the property dictionary on fall through.
+ // Scratch registers:
+ // t0: used to holds the receiver map.
+ // t1: used to holds the receiver instance type, receiver bit mask and
+ // elements map.
+
+ // Check that the receiver isn't a smi.
+ __ tst(receiver, Operand(kSmiTagMask));
+ __ b(eq, miss);
+
+ // Check that the receiver is a valid JS object.
+ __ CompareObjectType(receiver, t0, t1, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, miss);
+
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+ GenerateGlobalInstanceTypeCheck(masm, t1, miss);
+
+ // Check that the global object does not require access checks.
+ __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
+ __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasNamedInterceptor)));
__ b(nz, miss);
- // Bail out if we have a JS global proxy object.
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ cmp(scratch2, Operand(JS_GLOBAL_PROXY_TYPE));
- __ b(eq, miss);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(t1, ip);
+ __ b(nz, miss);
+}
- // Possible work-around for http://crbug.com/16276.
- // See also: http://codereview.chromium.org/155418.
- __ cmp(scratch2, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ b(eq, miss);
- __ cmp(scratch2, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(eq, miss);
- // Load the properties array.
- __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- if (check_dictionary == CHECK_DICTIONARY) {
- __ ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(scratch2, ip);
- __ b(ne, miss);
- }
-
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+static void GenerateStringDictionaryProbes(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
// Compute the capacity mask.
const int kCapacityOffset = StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
- __ ldr(scratch2, FieldMemOperand(scratch1, kCapacityOffset));
- __ mov(scratch2, Operand(scratch2, ASR, kSmiTagSize)); // convert smi to int
- __ sub(scratch2, scratch2, Operand(1));
+ __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
+ __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
+ __ sub(scratch1, scratch1, Operand(1));
const int kElementsStartOffset = StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
@@ -122,44 +131,141 @@
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(scratch3, FieldMemOperand(name, String::kHashFieldOffset));
+ __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
ASSERT(StringDictionary::GetProbeOffset(i) <
1 << (32 - String::kHashFieldOffset));
- __ add(scratch3, scratch3, Operand(
+ __ add(scratch2, scratch2, Operand(
StringDictionary::GetProbeOffset(i) << String::kHashShift));
}
- __ and_(scratch3, scratch2, Operand(scratch3, LSR, String::kHashShift));
+ __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
// Scale the index by multiplying by the element size.
ASSERT(StringDictionary::kEntrySize == 3);
- // scratch3 = scratch3 * 3.
- __ add(scratch3, scratch3, Operand(scratch3, LSL, 1));
+ // scratch2 = scratch2 * 3.
+ __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
- __ add(scratch3, scratch1, Operand(scratch3, LSL, 2));
- __ ldr(ip, FieldMemOperand(scratch3, kElementsStartOffset));
+ __ add(scratch2, elements, Operand(scratch2, LSL, 2));
+ __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
__ cmp(name, Operand(ip));
if (i != kProbes - 1) {
- __ b(eq, &done);
+ __ b(eq, done);
} else {
__ b(ne, miss);
}
}
+}
- // Check that the value is a normal property.
- __ bind(&done); // scratch3 == scratch1 + 4 * index
- __ ldr(scratch2,
- FieldMemOperand(scratch3, kElementsStartOffset + 2 * kPointerSize));
- __ tst(scratch2, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+
+// Helper function used from LoadIC/CallIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done. Can be the same as elements or name clobbering
+// one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register result,
+ Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ GenerateStringDictionaryProbes(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry check that the value is a normal
+ // property.
+ __ bind(&done); // scratch2 == elements + 4 * index
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ tst(scratch1, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ b(ne, miss);
// Get the value at the masked, scaled index and return.
__ ldr(result,
- FieldMemOperand(scratch3, kElementsStartOffset + 1 * kPointerSize));
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register value,
+ Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ GenerateStringDictionaryProbes(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ bind(&done); // scratch2 == elements + 4 * index
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask
+ = (PropertyDetails::TypeField::mask() |
+ PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
+ __ b(ne, miss);
+
+ // Store the value at the masked, scaled index and return.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+ __ str(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ mov(scratch1, value);
+ __ RecordWrite(elements, scratch2, scratch1);
}
@@ -310,6 +416,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ int interceptor_bit,
Label* slow) {
// Check that the object isn't a smi.
__ BranchOnSmi(receiver, slow);
@@ -317,8 +424,9 @@
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
- __ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
- __ b(ne, slow);
+ __ tst(scratch2,
+ Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+ __ b(nz, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
@@ -502,13 +610,11 @@
}
-static void GenerateNormalHelper(MacroAssembler* masm,
- int argc,
- bool is_global_object,
- Label* miss,
- Register scratch) {
- // Search dictionary - put result in register r1.
- GenerateDictionaryLoad(masm, miss, r1, r2, r1, r0, r3, r4, CHECK_DICTIONARY);
+static void GenerateFunctionTailCall(MacroAssembler* masm,
+ int argc,
+ Label* miss,
+ Register scratch) {
+ // r1: function
// Check that the value isn't a smi.
__ tst(r1, Operand(kSmiTagMask));
@@ -518,13 +624,6 @@
__ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
__ b(ne, miss);
- // Patch the receiver with the global proxy if necessary.
- if (is_global_object) {
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r0, MemOperand(sp, argc * kPointerSize));
- }
-
// Invoke the function.
ParameterCount actual(argc);
__ InvokeFunction(r1, actual, JUMP_FUNCTION);
@@ -536,53 +635,18 @@
// -- r2 : name
// -- lr : return address
// -----------------------------------
- Label miss, global_object, non_global_object;
+ Label miss;
// Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ GenerateStringDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
- // Check that the receiver is a valid JS object. Put the map in r3.
- __ CompareObjectType(r1, r3, r0, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &miss);
+ // r0: elements
+ // Search the dictionary - put result in register r1.
+ GenerateDictionaryLoad(masm, &miss, r0, r2, r1, r3, r4);
- // If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
- // Check for access to global object.
- __ cmp(r0, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ b(eq, &global_object);
- __ cmp(r0, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(ne, &non_global_object);
-
- // Accessing global object: Load and invoke.
- __ bind(&global_object);
- // Check that the global object does not require access checks.
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &miss);
- GenerateNormalHelper(masm, argc, true, &miss, r4);
-
- // Accessing non-global object: Check for access to global proxy.
- Label global_proxy, invoke;
- __ bind(&non_global_object);
- __ cmp(r0, Operand(JS_GLOBAL_PROXY_TYPE));
- __ b(eq, &global_proxy);
- // Check that the non-global, non-global-proxy object does not
- // require access checks.
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &miss);
- __ bind(&invoke);
- GenerateNormalHelper(masm, argc, false, &miss, r4);
-
- // Global object access: Check access rights.
- __ bind(&global_proxy);
- __ CheckAccessGlobalProxy(r1, r0, &miss);
- __ b(&invoke);
+ GenerateFunctionTailCall(masm, argc, &miss, r4);
__ bind(&miss);
}
@@ -594,6 +658,12 @@
// -- lr : return address
// -----------------------------------
+ if (id == IC::kCallIC_Miss) {
+ __ IncrementCounter(&Counters::call_miss, 1, r3, r4);
+ } else {
+ __ IncrementCounter(&Counters::keyed_call_miss, 1, r3, r4);
+ }
+
// Get the receiver of the function from the stack.
__ ldr(r3, MemOperand(sp, argc * kPointerSize));
@@ -614,23 +684,26 @@
__ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
- Label invoke, global;
- __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &invoke);
- __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
- __ b(eq, &global);
- __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(ne, &invoke);
+ // This can happen only for regular CallIC but not KeyedCallIC.
+ if (id == IC::kCallIC_Miss) {
+ Label invoke, global;
+ __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(eq, &invoke);
+ __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
+ __ b(eq, &global);
+ __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ b(ne, &invoke);
- // Patch the receiver on the stack.
- __ bind(&global);
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
- __ str(r2, MemOperand(sp, argc * kPointerSize));
+ // Patch the receiver on the stack.
+ __ bind(&global);
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+ __ str(r2, MemOperand(sp, argc * kPointerSize));
+ __ bind(&invoke);
+ }
// Invoke the function.
ParameterCount actual(argc);
- __ bind(&invoke);
__ InvokeFunction(r1, actual, JUMP_FUNCTION);
}
@@ -698,7 +771,8 @@
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
- GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &slow_call);
+ GenerateKeyedLoadReceiverCheck(
+ masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
GenerateFastArrayLoad(
masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
@@ -708,14 +782,7 @@
// receiver in r1 is not used after this point.
// r2: key
// r1: function
-
- // Check that the value in r1 is a JSFunction.
- __ BranchOnSmi(r1, &slow_call);
- __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
- __ b(ne, &slow_call);
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+ GenerateFunctionTailCall(masm, argc, &slow_call, r0);
__ bind(&check_number_dictionary);
// r2: key
@@ -751,16 +818,16 @@
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &lookup_monomorphic_cache);
+ GenerateKeyedLoadReceiverCheck(
+ masm, r1, r0, r3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
- __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldr(r0, FieldMemOperand(r1, JSObject::kPropertiesOffset));
+ __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &lookup_monomorphic_cache);
- GenerateDictionaryLoad(
- masm, &slow_load, r1, r2, r1, r0, r3, r4, DICTIONARY_CHECK_DONE);
+ GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1, r0, r3);
__ jmp(&do_call);
@@ -826,36 +893,14 @@
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
- Label miss, probe, global;
+ Label miss;
- // Check that the receiver isn't a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ GenerateStringDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
- // Check that the receiver is a valid JS object. Put the map in r3.
- __ CompareObjectType(r0, r3, r1, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &miss);
- // If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
- // Check for access to global object (unlikely).
- __ cmp(r1, Operand(JS_GLOBAL_PROXY_TYPE));
- __ b(eq, &global);
-
- // Check for non-global object that requires access check.
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &miss);
-
- __ bind(&probe);
- GenerateDictionaryLoad(masm, &miss, r0, r2, r0, r1, r3, r4, CHECK_DICTIONARY);
+ // r1: elements
+ GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
__ Ret();
- // Global object access: Check access rights.
- __ bind(&global);
- __ CheckAccessGlobalProxy(r0, r1, &miss);
- __ b(&probe);
-
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
@@ -870,6 +915,8 @@
// -- sp[0] : receiver
// -----------------------------------
+ __ IncrementCounter(&Counters::load_miss, 1, r3, r4);
+
__ mov(r3, r0);
__ Push(r3, r2);
@@ -963,7 +1010,7 @@
// Patch the map check.
Address ldr_map_instr_address =
inline_end_address -
- (CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatch *
+ (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
Assembler::kInstrSize);
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
@@ -1013,6 +1060,8 @@
// -- r1 : receiver
// -----------------------------------
+ __ IncrementCounter(&Counters::keyed_load_miss, 1, r3, r4);
+
__ Push(r1, r0);
ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
@@ -1045,14 +1094,15 @@
Register key = r0;
Register receiver = r1;
- GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r3, &slow);
-
// Check that the key is a smi.
__ BranchOnNotSmi(key, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
+
GenerateFastArrayLoad(
masm, receiver, key, r4, r3, r2, r0, &check_pixel_array, &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
@@ -1095,12 +1145,15 @@
__ bind(&check_string);
GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
+
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
__ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
+ __ cmp(r4, ip);
__ b(eq, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
@@ -1148,9 +1201,14 @@
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
+ // r1: receiver
+ // r0: key
+ // r3: elements
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
// Load the property to r0.
- GenerateDictionaryLoad(
- masm, &slow, r1, r0, r0, r2, r3, r4, DICTIONARY_CHECK_DONE);
+ GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
__ Ret();
@@ -1744,7 +1802,7 @@
// Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above.
Register zeros = scratch2;
- __ CountLeadingZeros(ival, scratch1, zeros);
+ __ CountLeadingZeros(zeros, ival, scratch1);
// Compute exponent and or it into the exponent register.
__ rsb(scratch1,
@@ -2160,6 +2218,27 @@
}
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
+
+ GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
+ __ IncrementCounter(&Counters::store_normal_hit, 1, r4, r5);
+ __ Ret();
+
+ __ bind(&miss);
+ __ IncrementCounter(&Counters::store_normal_miss, 1, r4, r5);
+ GenerateMiss(masm);
+}
+
+
#undef __
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
index 86198fb..c6eb628 100644
--- a/src/arm/jump-target-arm.cc
+++ b/src/arm/jump-target-arm.cc
@@ -61,9 +61,17 @@
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
+ // Zap the fall-through frame since the jump was unconditional.
RegisterFile empty;
cgen()->SetFrame(NULL, &empty);
}
+ if (entry_label_.is_bound()) {
+ // You can't jump backwards to an already bound label unless you admitted
+ // up front that this was a bidirectional jump target. Bidirectional jump
+ // targets will zap their type info when bound in case some later virtual
+ // frame with less precise type info branches to them.
+ ASSERT(direction_ != FORWARD_ONLY);
+ }
__ jmp(&entry_label_);
}
@@ -83,6 +91,13 @@
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
}
+ if (entry_label_.is_bound()) {
+ // You can't branch backwards to an already bound label unless you admitted
+ // up front that this was a bidirectional jump target. Bidirectional jump
+ // targets will zap their type info when bound in case some later virtual
+ // frame with less precise type info branches to them.
+ ASSERT(direction_ != FORWARD_ONLY);
+ }
__ b(cc, &entry_label_);
if (cc == al) {
cgen()->DeleteFrame();
@@ -121,6 +136,7 @@
ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
if (cgen()->has_valid_frame()) {
+ if (direction_ != FORWARD_ONLY) cgen()->frame()->ForgetTypeInfo();
// If there is a current frame we can use it on the fall through.
if (!entry_frame_set_) {
entry_frame_ = *cgen()->frame();
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 29e168c..81fc11e 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -310,32 +310,28 @@
void MacroAssembler::RecordWriteHelper(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1) {
+ Register address,
+ Register scratch) {
if (FLAG_debug_code) {
// Check that the object is not in new space.
Label not_in_new_space;
- InNewSpace(object, scratch1, ne, ¬_in_new_space);
+ InNewSpace(object, scratch, ne, ¬_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(¬_in_new_space);
}
- // Add offset into the object.
- add(scratch0, object, offset);
-
// Calculate page address.
Bfc(object, 0, kPageSizeBits);
// Calculate region number.
- Ubfx(scratch0, scratch0, Page::kRegionSizeLog2,
+ Ubfx(address, address, Page::kRegionSizeLog2,
kPageSizeBits - Page::kRegionSizeLog2);
// Mark region dirty.
- ldr(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
+ ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
- orr(scratch1, scratch1, Operand(ip, LSL, scratch0));
- str(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
+ orr(scratch, scratch, Operand(ip, LSL, address));
+ str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
}
@@ -368,8 +364,11 @@
// region marks for new space pages.
InNewSpace(object, scratch0, eq, &done);
+ // Add offset into the object.
+ add(scratch0, object, offset);
+
// Record the actual write.
- RecordWriteHelper(object, offset, scratch0, scratch1);
+ RecordWriteHelper(object, scratch0, scratch1);
bind(&done);
@@ -383,6 +382,38 @@
}
+// Will clobber 4 registers: object, address, scratch, ip. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
+ Register scratch) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+
+ Label done;
+
+ // First, test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ InNewSpace(object, scratch, eq, &done);
+
+ // Record the actual write.
+ RecordWriteHelper(object, address, scratch);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ mov(object, Operand(BitCast<int32_t>(kZapValue)));
+ mov(address, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
void MacroAssembler::Ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
ASSERT(src.rm().is(no_reg));
@@ -1369,6 +1400,56 @@
}
+void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
+ DwVfpRegister result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ SwVfpRegister scratch3,
+ Label* not_number,
+ ObjectToDoubleFlags flags) {
+ Label done;
+ if ((flags & OBJECT_NOT_SMI) == 0) {
+ Label not_smi;
+ BranchOnNotSmi(object, ¬_smi);
+ // Remove smi tag and convert to double.
+ mov(scratch1, Operand(object, ASR, kSmiTagSize));
+ vmov(scratch3, scratch1);
+ vcvt_f64_s32(result, scratch3);
+ b(&done);
+ bind(¬_smi);
+ }
+ // Check for heap number and load double value from it.
+ ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+ sub(scratch2, object, Operand(kHeapObjectTag));
+ cmp(scratch1, heap_number_map);
+ b(ne, not_number);
+ if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
+ // If exponent is all ones the number is either a NaN or +/-Infinity.
+ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ Sbfx(scratch1,
+ scratch1,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // All-one value sign extend to -1.
+ cmp(scratch1, Operand(-1));
+ b(eq, not_number);
+ }
+ vldr(result, scratch2, HeapNumber::kValueOffset);
+ bind(&done);
+}
+
+
+void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
+ DwVfpRegister value,
+ Register scratch1,
+ SwVfpRegister scratch2) {
+ mov(scratch1, Operand(smi, ASR, kSmiTagSize));
+ vmov(scratch2, scratch1);
+ vcvt_f64_s32(value, scratch2);
+}
+
+
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
@@ -1548,6 +1629,8 @@
void MacroAssembler::Abort(const char* msg) {
+ Label abort_start;
+ bind(&abort_start);
// We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned
// properly. Instead, we pass an aligned pointer that is
@@ -1571,6 +1654,17 @@
push(r0);
CallRuntime(Runtime::kAbort, 2);
// will not return here
+ if (is_const_pool_blocked()) {
+ // If the calling code cares about the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the Abort macro constant.
+ static const int kExpectedAbortInstructions = 10;
+ int abort_instructions = InstructionsGeneratedSince(&abort_start);
+ ASSERT(abort_instructions <= kExpectedAbortInstructions);
+ while (abort_instructions++ < kExpectedAbortInstructions) {
+ nop();
+ }
+ }
}
@@ -1673,14 +1767,31 @@
}
-void MacroAssembler::CountLeadingZeros(Register source,
- Register scratch,
- Register zeros) {
+void MacroAssembler::AllocateHeapNumberWithValue(Register result,
+ DwVfpRegister value,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* gc_required) {
+ AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
+ sub(scratch1, result, Operand(kHeapObjectTag));
+ vstr(value, scratch1, HeapNumber::kValueOffset);
+}
+
+
+void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
+ Register source, // Input.
+ Register scratch) {
+ ASSERT(!zeros.is(source) || !source.is(zeros));
+ ASSERT(!zeros.is(scratch));
+ ASSERT(!scratch.is(ip));
+ ASSERT(!source.is(ip));
+ ASSERT(!zeros.is(ip));
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
clz(zeros, source); // This instruction is only supported after ARM5.
#else
mov(zeros, Operand(0));
- mov(scratch, source);
+ Move(scratch, source);
// Top 16.
tst(scratch, Operand(0xffff0000));
add(zeros, zeros, Operand(16), LeaveCC, eq);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index e02a6c8..d57c565 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -67,6 +67,17 @@
};
+// Flags used for the ObjectToDoubleVFPRegister function.
+enum ObjectToDoubleFlags {
+ // No special flags.
+ NO_OBJECT_TO_DOUBLE_FLAGS = 0,
+ // Object is known to be a non smi.
+ OBJECT_NOT_SMI = 1 << 0,
+ // Don't load NaNs or infinities, branch to the non number case instead.
+ AVOID_NANS_AND_INFINITIES = 1 << 1
+};
+
+
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -126,22 +137,32 @@
Label* branch);
- // For the page containing |object| mark the region covering [object+offset]
+ // For the page containing |object| mark the region covering [address]
// dirty. The object address must be in the first 8K of an allocated page.
void RecordWriteHelper(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1);
+ Register address,
+ Register scratch);
- // For the page containing |object| mark the region covering [object+offset]
- // dirty. The object address must be in the first 8K of an allocated page.
- // The 'scratch' registers are used in the implementation and all 3 registers
- // are clobbered by the operation, as well as the ip register.
+ // For the page containing |object| mark the region covering
+ // [object+offset] dirty. The object address must be in the first 8K
+ // of an allocated page. The 'scratch' registers are used in the
+ // implementation and all 3 registers are clobbered by the
+ // operation, as well as the ip register. RecordWrite updates the
+ // write barrier even when storing smis.
void RecordWrite(Register object,
Operand offset,
Register scratch0,
Register scratch1);
+ // For the page containing |object| mark the region covering
+ // [address] dirty. The object address must be in the first 8K of an
+ // allocated page. All 3 registers are clobbered by the operation,
+ // as well as the ip register. RecordWrite updates the write barrier
+ // even when storing smis.
+ void RecordWrite(Register object,
+ Register address,
+ Register scratch);
+
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
ASSERT(!src1.is(src2));
@@ -381,6 +402,13 @@
Register scratch2,
Register heap_number_map,
Label* gc_required);
+ void AllocateHeapNumberWithValue(Register result,
+ DwVfpRegister value,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -469,12 +497,35 @@
Register outHighReg,
Register outLowReg);
+ // Load the value of a number object into a VFP double register. If the object
+ // is not a number a jump to the label not_number is performed and the VFP
+ // double register is unchanged.
+ void ObjectToDoubleVFPRegister(
+ Register object,
+ DwVfpRegister value,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ SwVfpRegister scratch3,
+ Label* not_number,
+ ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
+
+ // Load the value of a smi object into a VFP double register. The register
+ // scratch1 can be the same register as smi in which case smi will hold the
+ // untagged value afterwards.
+ void SmiToDoubleVFPRegister(Register smi,
+ DwVfpRegister value,
+ Register scratch1,
+ SwVfpRegister scratch2);
+
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
- // for 0 (31 instead of 32).
- void CountLeadingZeros(Register source,
- Register scratch,
- Register zeros);
+ // for 0 (31 instead of 32). Source and scratch can be the same in which case
+ // the source is clobbered. Source and zeros can also be the same in which
+ // case scratch should be a different register.
+ void CountLeadingZeros(Register zeros,
+ Register source,
+ Register scratch);
// ---------------------------------------------------------------------------
// Runtime calls
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 77776c2..f09ce00 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
+#include <math.h>
#include <cstdarg>
#include "v8.h"
@@ -2262,7 +2263,8 @@
// Dd = vmul(Dn, Dm)
// Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm)
-// VMRS
+// vmrs
+// Dd = vsqrt(Dm)
void Simulator::DecodeTypeVFP(Instr* instr) {
ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
@@ -2274,7 +2276,14 @@
if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) {
// Other data processing instructions
- if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+ if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
+ // vmov register to register.
+ if (instr->SzField() == 0x1) {
+ set_d_register_from_double(vd, get_double_from_d_register(vm));
+ } else {
+ UNREACHABLE(); // Not used by V8.
+ }
+ } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
@@ -2284,6 +2293,11 @@
} else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Opc3Field() & 0x1)) {
DecodeVCMP(instr);
+ } else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) {
+ // vsqrt
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = sqrt(dm_value);
+ set_d_register_from_double(vd, dd_value);
} else {
UNREACHABLE(); // Not used by V8.
}
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 3e5ba11..0e649cc 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -741,7 +741,8 @@
Register scratch,
String* name,
int save_at_depth,
- Label* miss) {
+ Label* miss,
+ Register extra) {
// Check that the maps haven't changed.
Register result =
masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 8b90f42..2ddfd0f 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -482,6 +482,32 @@
}
+void VirtualFrame::SpillAllButCopyTOSToR1() {
+ switch (top_of_stack_state_) {
+ case NO_TOS_REGISTERS:
+ __ ldr(r1, MemOperand(sp, 0));
+ break;
+ case R0_TOS:
+ __ push(r0);
+ __ mov(r1, r0);
+ break;
+ case R1_TOS:
+ __ push(r1);
+ break;
+ case R0_R1_TOS:
+ __ Push(r1, r0);
+ __ mov(r1, r0);
+ break;
+ case R1_R0_TOS:
+ __ Push(r0, r1);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ top_of_stack_state_ = NO_TOS_REGISTERS;
+}
+
+
void VirtualFrame::SpillAllButCopyTOSToR1R0() {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
@@ -524,6 +550,24 @@
}
+Register VirtualFrame::Peek2() {
+ AssertIsNotSpilled();
+ switch (top_of_stack_state_) {
+ case NO_TOS_REGISTERS:
+ case R0_TOS:
+ case R0_R1_TOS:
+ MergeTOSTo(R0_R1_TOS);
+ return r1;
+ case R1_TOS:
+ case R1_R0_TOS:
+ MergeTOSTo(R1_R0_TOS);
+ return r0;
+ }
+ UNREACHABLE();
+ return no_reg;
+}
+
+
void VirtualFrame::Dup() {
if (SpilledScope::is_spilled()) {
__ ldr(ip, MemOperand(sp, 0));
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index d8dc5c6..8eedf22 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -189,12 +189,15 @@
return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
}
+ inline void ForgetTypeInfo() {
+ tos_known_smi_map_ = 0;
+ }
+
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
- AssertIsSpilled();
}
// (Re)attach a frame to its code generator. This informs the register
@@ -202,7 +205,6 @@
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
void AttachToCodeGenerator() {
- AssertIsSpilled();
}
// Emit code for the physical JS entry and exit frame sequences. After
@@ -330,6 +332,10 @@
// must be copied to a scratch register before modification.
Register Peek();
+ // Look at the value beneath the top of the stack. The register returned is
+ // aliased and must be copied to a scratch register before modification.
+ Register Peek2();
+
// Duplicate the top of stack.
void Dup();
@@ -339,6 +345,9 @@
// Flushes all registers, but it puts a copy of the top-of-stack in r0.
void SpillAllButCopyTOSToR0();
+ // Flushes all registers, but it puts a copy of the top-of-stack in r1.
+ void SpillAllButCopyTOSToR1();
+
// Flushes all registers, but it puts a copy of the top-of-stack in r1
// and the next value on the stack in r0.
void SpillAllButCopyTOSToR1R0();
diff --git a/src/array.js b/src/array.js
index 216c03b..f3c0697 100644
--- a/src/array.js
+++ b/src/array.js
@@ -954,7 +954,7 @@
function ArrayIndexOf(element, index) {
var length = this.length;
- if (index == null) {
+ if (IS_UNDEFINED(index)) {
index = 0;
} else {
index = TO_INTEGER(index);
@@ -981,7 +981,7 @@
function ArrayLastIndexOf(element, index) {
var length = this.length;
- if (index == null) {
+ if (%_ArgumentsLength() < 2) {
index = length - 1;
} else {
index = TO_INTEGER(index);
diff --git a/src/ast-inl.h b/src/ast-inl.h
index 2b5d7c4..717f68d 100644
--- a/src/ast-inl.h
+++ b/src/ast-inl.h
@@ -45,7 +45,9 @@
IterationStatement::IterationStatement(ZoneStringList* labels)
- : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) {
+ : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
+ body_(NULL),
+ continue_target_(JumpTarget::BIDIRECTIONAL) {
}
diff --git a/src/builtins.cc b/src/builtins.cc
index 7116dc9..ad52ea1 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -195,6 +195,7 @@
}
// 'array' now contains the JSArray we should initialize.
+ ASSERT(array->HasFastElements());
// Optimize the case where there is one argument and the argument is a
// small smi.
@@ -1262,6 +1263,11 @@
}
+static void Generate_StoreIC_Normal(MacroAssembler* masm) {
+ StoreIC::GenerateNormal(masm);
+}
+
+
static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
StoreIC::GenerateMegamorphic(masm);
}
diff --git a/src/builtins.h b/src/builtins.h
index 1fab375..3dcab62 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -98,6 +98,7 @@
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \
V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC) \
+ V(StoreIC_Normal, STORE_IC, MONOMORPHIC) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \
diff --git a/src/code-stubs.h b/src/code-stubs.h
index de2ad56..e5a222f 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -46,6 +46,7 @@
V(RecordWrite) \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
+ V(IntegerMod) \
V(StackCheck) \
V(FastNewClosure) \
V(FastNewContext) \
diff --git a/src/date.js b/src/date.js
index e780cb8..83fca27 100644
--- a/src/date.js
+++ b/src/date.js
@@ -347,9 +347,10 @@
function MakeDay(year, month, date) {
if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
- year = TO_INTEGER(year);
- month = TO_INTEGER(month);
- date = TO_INTEGER(date);
+ // Convert to integer and map -0 to 0.
+ year = TO_INTEGER_MAP_MINUS_ZERO(year);
+ month = TO_INTEGER_MAP_MINUS_ZERO(month);
+ date = TO_INTEGER_MAP_MINUS_ZERO(date);
if (year < kMinYear || year > kMaxYear ||
month < kMinMonth || month > kMaxMonth ||
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index d5e91cb..47a3c8e 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -236,6 +236,7 @@
this.active_ = true;
this.condition_ = null;
this.ignoreCount_ = 0;
+ this.break_points_ = [];
}
@@ -289,6 +290,15 @@
};
+ScriptBreakPoint.prototype.actual_locations = function() {
+ var locations = [];
+ for (var i = 0; i < this.break_points_.length; i++) {
+ locations.push(this.break_points_[i].actual_location);
+ }
+ return locations;
+}
+
+
ScriptBreakPoint.prototype.update_positions = function(line, column) {
this.line_ = line;
this.column_ = column;
@@ -334,10 +344,8 @@
this.ignoreCount_ = ignoreCount;
// Set ignore count on all break points created from this script break point.
- for (var i = 0; i < break_points.length; i++) {
- if (break_points[i].script_break_point() === this) {
- break_points[i].setIgnoreCount(ignoreCount);
- }
+ for (var i = 0; i < this.break_points_.length; i++) {
+ this.break_points_[i].setIgnoreCount(ignoreCount);
}
};
@@ -379,20 +387,23 @@
}
// Convert the line and column into an absolute position within the script.
- var pos = Debug.findScriptSourcePosition(script, this.line(), column);
+ var position = Debug.findScriptSourcePosition(script, this.line(), column);
// If the position is not found in the script (the script might be shorter
// than it used to be) just ignore it.
- if (pos === null) return;
+ if (position === null) return;
// Create a break point object and set the break point.
- break_point = MakeBreakPoint(pos, this.line(), this.column(), this);
+ break_point = MakeBreakPoint(position, this.line(), this.column(), this);
break_point.setIgnoreCount(this.ignoreCount());
- pos = %SetScriptBreakPoint(script, pos, break_point);
- if (!IS_UNDEFINED(pos)) {
- this.actual_location = script.locationFromPosition(pos);
+ var actual_position = %SetScriptBreakPoint(script, position, break_point);
+ if (IS_UNDEFINED(actual_position)) {
+ actual_position = position;
}
-
+ var actual_location = script.locationFromPosition(actual_position, true);
+ break_point.actual_location = { line: actual_location.line,
+ column: actual_location.column };
+ this.break_points_.push(break_point);
return break_point;
};
@@ -409,6 +420,7 @@
}
}
break_points = remaining_break_points;
+ this.break_points_ = [];
};
@@ -554,6 +566,19 @@
}
};
+Debug.findBreakPointActualLocations = function(break_point_number) {
+ for (var i = 0; i < script_break_points.length; i++) {
+ if (script_break_points[i].number() == break_point_number) {
+ return script_break_points[i].actual_locations();
+ }
+ }
+ for (var i = 0; i < break_points.length; i++) {
+ if (break_points[i].number() == break_point_number) {
+ return [break_points[i].actual_location];
+ }
+ }
+ return [];
+}
Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
if (!IS_FUNCTION(func)) throw new Error('Parameters have wrong types.');
@@ -585,7 +610,12 @@
} else {
// Set a break point directly on the function.
var break_point = MakeBreakPoint(source_position, opt_line, opt_column);
- %SetFunctionBreakPoint(func, source_position, break_point);
+ var actual_position =
+ %SetFunctionBreakPoint(func, source_position, break_point);
+ actual_position += this.sourcePosition(func);
+ var actual_location = script.locationFromPosition(actual_position, true);
+ break_point.actual_location = { line: actual_location.line,
+ column: actual_location.column };
break_point.setCondition(opt_condition);
return break_point.number();
}
@@ -1482,8 +1512,10 @@
}
response.body.line = break_point.line();
response.body.column = break_point.column();
+ response.body.actual_locations = break_point.actual_locations();
} else {
response.body.type = 'function';
+ response.body.actual_locations = [break_point.actual_location];
}
};
@@ -1598,7 +1630,8 @@
hit_count: break_point.hit_count(),
active: break_point.active(),
condition: break_point.condition(),
- ignoreCount: break_point.ignoreCount()
+ ignoreCount: break_point.ignoreCount(),
+ actual_locations: break_point.actual_locations()
}
if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
@@ -2037,6 +2070,7 @@
return response.failed('Missing arguments');
}
var script_id = request.arguments.script_id;
+ var preview_only = !!request.arguments.preview_only;
var scripts = %DebugGetLoadedScripts();
@@ -2059,18 +2093,9 @@
var new_source = request.arguments.new_source;
- try {
- Debug.LiveEdit.SetScriptSource(the_script, new_source, change_log);
- } catch (e) {
- if (e instanceof Debug.LiveEdit.Failure) {
- // Let's treat it as a "success" so that body with change_log will be
- // sent back. "change_log" will have "failure" field set.
- change_log.push( { failure: true, message: e.toString() } );
- } else {
- throw e;
- }
- }
- response.body = {change_log: change_log};
+ var result_description = Debug.LiveEdit.SetScriptSource(the_script,
+ new_source, preview_only, change_log);
+ response.body = {change_log: change_log, result: result_description};
};
diff --git a/src/debug.cc b/src/debug.cc
index d513b31..1dc6275 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -684,6 +684,12 @@
void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
+ // We need to clear all breakpoints associated with the function to restore
+ // original code and avoid patching the code twice later because
+ // the function will live in the heap until next gc, and can be found by
+ // Runtime::FindSharedFunctionInfoInScript.
+ BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
+ it.ClearAllDebugBreak();
RemoveDebugInfo(node->debug_info());
#ifdef DEBUG
node = Debug::debug_info_list_;
@@ -854,7 +860,7 @@
HandleScope scope;
ASSERT(args.length() == 0);
- thread_local_.frames_are_dropped_ = false;
+ thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it;
@@ -932,12 +938,22 @@
PrepareStep(step_action, step_count);
}
- if (thread_local_.frames_are_dropped_) {
- // We must have been calling IC stub. Do not return there anymore.
+ if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
+ SetAfterBreakTarget(frame);
+ } else if (thread_local_.frame_drop_mode_ == FRAME_DROPPED_IN_IC_CALL) {
+ // We must have been calling IC stub. Do not go there anymore.
Code* plain_return = Builtins::builtin(Builtins::PlainReturn_LiveEdit);
thread_local_.after_break_target_ = plain_return->entry();
+ } else if (thread_local_.frame_drop_mode_ ==
+ FRAME_DROPPED_IN_DEBUG_SLOT_CALL) {
+ // Debug break slot stub does not return normally, instead it manually
+ // cleans the stack and jumps. We should patch the jump address.
+ Code* plain_return = Builtins::builtin(Builtins::FrameDropper_LiveEdit);
+ thread_local_.after_break_target_ = plain_return->entry();
+ } else if (thread_local_.frame_drop_mode_ == FRAME_DROPPED_IN_DIRECT_CALL) {
+ // Nothing to do, after_break_target is not used here.
} else {
- SetAfterBreakTarget(frame);
+ UNREACHABLE();
}
return Heap::undefined_value();
@@ -1749,8 +1765,9 @@
}
-void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id) {
- thread_local_.frames_are_dropped_ = true;
+void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
+ FrameDropMode mode) {
+ thread_local_.frame_drop_mode_ = mode;
thread_local_.break_frame_id_ = new_break_frame_id;
}
diff --git a/src/debug.h b/src/debug.h
index 6019294..fb92692 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -400,7 +400,22 @@
// Called from stub-cache.cc.
static void GenerateCallICDebugBreak(MacroAssembler* masm);
- static void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id);
+ // Describes how exactly a frame has been dropped from stack.
+ enum FrameDropMode {
+ // No frame has been dropped.
+ FRAMES_UNTOUCHED,
+ // The top JS frame had been calling IC stub. IC stub mustn't be called now.
+ FRAME_DROPPED_IN_IC_CALL,
+ // The top JS frame had been calling debug break slot stub. Patch the
+ // address this stub jumps to in the end.
+ FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
+ // The top JS frame had been calling some C++ function. The return address
+ // gets patched automatically.
+ FRAME_DROPPED_IN_DIRECT_CALL
+ };
+
+ static void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
+ FrameDropMode mode);
static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Handle<Code> code);
@@ -471,8 +486,9 @@
// Storage location for jump when exiting debug break calls.
Address after_break_target_;
- // Indicates that LiveEdit has patched the stack.
- bool frames_are_dropped_;
+ // Stores the way how LiveEdit has patched the stack. It is used when
+ // debugger returns control back to user script.
+ FrameDropMode frame_drop_mode_;
// Top debugger entry.
EnterDebugger* debugger_entry_;
diff --git a/src/factory.cc b/src/factory.cc
index 35d3c54..f6b93b0 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -274,11 +274,22 @@
return copy;
}
+
Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
CALL_HEAP_FUNCTION(src->CopyDropTransitions(), Map);
}
+Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
+ CALL_HEAP_FUNCTION(src->GetFastElementsMap(), Map);
+}
+
+
+Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
+ CALL_HEAP_FUNCTION(src->GetSlowElementsMap(), Map);
+}
+
+
Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
CALL_HEAP_FUNCTION(array->Copy(), FixedArray);
}
diff --git a/src/factory.h b/src/factory.h
index 8a190fa..b0a0571 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -180,6 +180,10 @@
static Handle<Map> CopyMapDropTransitions(Handle<Map> map);
+ static Handle<Map> GetFastElementsMap(Handle<Map> map);
+
+ static Handle<Map> GetSlowElementsMap(Handle<Map> map);
+
static Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
// Numbers (eg, literals) are pretenured by the parser.
diff --git a/src/globals.h b/src/globals.h
index 6cf2626..aea8858 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -463,6 +463,12 @@
};
+enum InlineCacheHolderFlag {
+ OWN_MAP, // For fast properties objects.
+ PROTOTYPE_MAP // For slow properties objects (except GlobalObjects).
+};
+
+
// Type of properties.
// Order of properties is significant.
// Must fit in the BitField PropertyDetails::TypeField.
diff --git a/src/handles.cc b/src/handles.cc
index c90365c..f2adab7 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -197,7 +197,17 @@
void FlattenString(Handle<String> string) {
CALL_HEAP_FUNCTION_VOID(string->TryFlatten());
+}
+
+
+Handle<String> FlattenGetString(Handle<String> string) {
+ Handle<String> result;
+ CALL_AND_RETRY(string->TryFlatten(),
+ { result = Handle<String>(String::cast(__object__));
+ break; },
+ return Handle<String>());
ASSERT(string->IsFlat());
+ return result;
}
diff --git a/src/handles.h b/src/handles.h
index 96b17a6..1e14daf 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -193,8 +193,14 @@
void NormalizeElements(Handle<JSObject> object);
void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
+
+// Flattens a string.
void FlattenString(Handle<String> str);
+// Flattens a string and returns the underlying external or sequential
+// string.
+Handle<String> FlattenGetString(Handle<String> str);
+
Handle<Object> SetProperty(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
diff --git a/src/heap.cc b/src/heap.cc
index f1ec56c..6ae46f2 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -126,6 +126,12 @@
int Heap::linear_allocation_scope_depth_ = 0;
int Heap::contexts_disposed_ = 0;
+int Heap::young_survivors_after_last_gc_ = 0;
+int Heap::high_survival_rate_period_length_ = 0;
+double Heap::survival_rate_ = 0;
+Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
+Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
+
#ifdef DEBUG
bool Heap::allocation_allowed_ = true;
@@ -582,6 +588,29 @@
}
#endif
+void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
+ double survival_rate =
+ (static_cast<double>(young_survivors_after_last_gc_) * 100) /
+ start_new_space_size;
+
+ if (survival_rate > kYoungSurvivalRateThreshold) {
+ high_survival_rate_period_length_++;
+ } else {
+ high_survival_rate_period_length_ = 0;
+ }
+
+ double survival_rate_diff = survival_rate_ - survival_rate;
+
+ if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
+ set_survival_rate_trend(DECREASING);
+ } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
+ set_survival_rate_trend(INCREASING);
+ } else {
+ set_survival_rate_trend(STABLE);
+ }
+
+ survival_rate_ = survival_rate;
+}
void Heap::PerformGarbageCollection(AllocationSpace space,
GarbageCollector collector,
@@ -604,6 +633,8 @@
EnsureFromSpaceIsCommitted();
+ int start_new_space_size = Heap::new_space()->Size();
+
if (collector == MARK_COMPACTOR) {
if (FLAG_flush_code) {
// Flush all potentially unused code.
@@ -613,16 +644,36 @@
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
+ bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
+ IsStableOrIncreasingSurvivalTrend();
+
+ UpdateSurvivalRateTrend(start_new_space_size);
+
int old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
+
+ if (high_survival_rate_during_scavenges &&
+ IsStableOrIncreasingSurvivalTrend()) {
+ // Stable high survival rates of young objects both during partial and
+ // full collection indicate that mutator is either building or modifying
+ // a structure with a long lifetime.
+ // In this case we aggressively raise old generation memory limits to
+ // postpone subsequent mark-sweep collection and thus trade memory
+ // space for the mutation speed.
+ old_gen_promotion_limit_ *= 2;
+ old_gen_allocation_limit_ *= 2;
+ }
+
old_gen_exhausted_ = false;
} else {
tracer_ = tracer;
Scavenge();
tracer_ = NULL;
+
+ UpdateSurvivalRateTrend(start_new_space_size);
}
Counters::objs_since_last_young.Set(0);
@@ -1217,7 +1268,7 @@
map->set_code_cache(empty_fixed_array());
map->set_unused_property_fields(0);
map->set_bit_field(0);
- map->set_bit_field2(1 << Map::kIsExtensible);
+ map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
// If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) {
@@ -2545,6 +2596,7 @@
map->set_inobject_properties(in_object_properties);
map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype);
+ ASSERT(map->has_fast_elements());
// If the function has only simple this property assignments add
// field descriptors for these to the initial map as the object
@@ -2598,8 +2650,8 @@
// properly initialized.
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
- // Both types of globla objects should be allocated using
- // AllocateGloblaObject to be properly initialized.
+ // Both types of global objects should be allocated using
+ // AllocateGlobalObject to be properly initialized.
ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
@@ -2623,6 +2675,7 @@
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
+ ASSERT(JSObject::cast(obj)->HasFastElements());
return obj;
}
diff --git a/src/heap.h b/src/heap.h
index a8f8c34..df3ba0e 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1005,6 +1005,7 @@
static void CheckNewSpaceExpansionCriteria();
static inline void IncrementYoungSurvivorsCounter(int survived) {
+ young_survivors_after_last_gc_ = survived;
survived_since_last_expansion_ += survived;
}
@@ -1272,6 +1273,55 @@
// be replaced with a lazy compilable version.
static void FlushCode();
+ static void UpdateSurvivalRateTrend(int start_new_space_size);
+
+ enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
+
+ static const int kYoungSurvivalRateThreshold = 90;
+ static const int kYoungSurvivalRateAllowedDeviation = 15;
+
+ static int young_survivors_after_last_gc_;
+ static int high_survival_rate_period_length_;
+ static double survival_rate_;
+ static SurvivalRateTrend previous_survival_rate_trend_;
+ static SurvivalRateTrend survival_rate_trend_;
+
+ static void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
+ ASSERT(survival_rate_trend != FLUCTUATING);
+ previous_survival_rate_trend_ = survival_rate_trend_;
+ survival_rate_trend_ = survival_rate_trend;
+ }
+
+ static SurvivalRateTrend survival_rate_trend() {
+ if (survival_rate_trend_ == STABLE) {
+ return STABLE;
+ } else if (previous_survival_rate_trend_ == STABLE) {
+ return survival_rate_trend_;
+ } else if (survival_rate_trend_ != previous_survival_rate_trend_) {
+ return FLUCTUATING;
+ } else {
+ return survival_rate_trend_;
+ }
+ }
+
+ static bool IsStableOrIncreasingSurvivalTrend() {
+ switch (survival_rate_trend()) {
+ case STABLE:
+ case INCREASING:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static bool IsIncreasingSurvivalTrend() {
+ return survival_rate_trend() == INCREASING;
+ }
+
+ static bool IsHighSurvivalRate() {
+ return high_survival_rate_period_length_ > 0;
+ }
+
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 6b07472..fa09dd8 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -8853,7 +8853,7 @@
// Use masm-> here instead of the double underscore macro since extra
// coverage code can interfere with the patching.
masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
+ Immediate(Factory::null_value()));
deferred->Branch(not_equal);
// Check that the key is a smi.
@@ -8868,9 +8868,11 @@
// is not a dictionary.
__ mov(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- deferred->Branch(not_equal);
+ if (FLAG_debug_code) {
+ __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ __ Assert(equal, "JSObject with fast elements map has slow elements");
+ }
// Check that the key is within bounds.
__ cmp(key.reg(),
@@ -11654,7 +11656,7 @@
void CompareStub::Generate(MacroAssembler* masm) {
- Label call_builtin, done;
+ Label check_unequal_objects, done;
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
@@ -11684,13 +11686,15 @@
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
} else {
- Label return_equal;
Label heap_number;
- // If it's not a heap number, then return equal.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
__ j(equal, &heap_number);
- __ bind(&return_equal);
+ if (cc_ != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(above_equal, ¬_identical);
+ }
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
@@ -11730,79 +11734,75 @@
__ bind(¬_identical);
}
- if (cc_ == equal) { // Both strict and non-strict.
+ // Strict equality can quickly decide whether objects are equal.
+ // Non-strict object equality is slower, so it is handled later in the stub.
+ if (cc_ == equal && strict_) {
Label slow; // Fallthrough label.
-
+ Label not_smis;
// If we're doing a strict equality comparison, we don't have to do
// type conversion, so we generate code to do fast comparison for objects
// and oddballs. Non-smi numbers and strings still go through the usual
// slow-case code.
- if (strict_) {
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- {
- Label not_smis;
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ mov(ecx, Immediate(kSmiTagMask));
- __ and_(ecx, Operand(eax));
- __ test(ecx, Operand(edx));
- __ j(not_zero, ¬_smis);
- // One operand is a smi.
+ // If either is a Smi (we know that not both are), then they can only
+ // be equal if the other is a HeapNumber. If so, use the slow case.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ mov(ecx, Immediate(kSmiTagMask));
+ __ and_(ecx, Operand(eax));
+ __ test(ecx, Operand(edx));
+ __ j(not_zero, ¬_smis);
+ // One operand is a smi.
- // Check whether the non-smi is a heap number.
- ASSERT_EQ(1, kSmiTagMask);
- // ecx still holds eax & kSmiTag, which is either zero or one.
- __ sub(Operand(ecx), Immediate(0x01));
- __ mov(ebx, edx);
- __ xor_(ebx, Operand(eax));
- __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
- __ xor_(ebx, Operand(eax));
- // if eax was smi, ebx is now edx, else eax.
+ // Check whether the non-smi is a heap number.
+ ASSERT_EQ(1, kSmiTagMask);
+ // ecx still holds eax & kSmiTag, which is either zero or one.
+ __ sub(Operand(ecx), Immediate(0x01));
+ __ mov(ebx, edx);
+ __ xor_(ebx, Operand(eax));
+ __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
+ __ xor_(ebx, Operand(eax));
+ // if eax was smi, ebx is now edx, else eax.
- // Check if the non-smi operand is a heap number.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- // If heap number, handle it in the slow case.
- __ j(equal, &slow);
- // Return non-equal (ebx is not zero)
- __ mov(eax, ebx);
- __ ret(0);
+ // Check if the non-smi operand is a heap number.
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ // If heap number, handle it in the slow case.
+ __ j(equal, &slow);
+ // Return non-equal (ebx is not zero)
+ __ mov(eax, ebx);
+ __ ret(0);
- __ bind(¬_smis);
- }
+ __ bind(¬_smis);
+ // If either operand is a JSObject or an oddball value, then they are not
+ // equal since their pointers are different
+ // There is no test for undetectability in strict equality.
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
+ // Get the type of the first operand.
+ // If the first object is a JS object, we have done pointer comparison.
+ Label first_non_object;
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(below, &first_non_object);
- // Get the type of the first operand.
- // If the first object is a JS object, we have done pointer comparison.
- Label first_non_object;
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(below, &first_non_object);
+ // Return non-zero (eax is not zero)
+ Label return_not_equal;
+ ASSERT(kHeapObjectTag != 0);
+ __ bind(&return_not_equal);
+ __ ret(0);
- // Return non-zero (eax is not zero)
- Label return_not_equal;
- ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(above_equal, &return_not_equal);
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &return_not_equal);
+ // Check for oddballs: true, false, null, undefined.
+ __ CmpInstanceType(ecx, ODDBALL_TYPE);
+ __ j(equal, &return_not_equal);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- }
+ // Fall through to the general case.
__ bind(&slow);
}
@@ -11889,7 +11889,8 @@
__ bind(&check_for_strings);
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &call_builtin);
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
+ &check_unequal_objects);
// Inline comparison of ascii strings.
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@@ -11902,7 +11903,44 @@
__ Abort("Unexpected fall-through from string comparison");
#endif
- __ bind(&call_builtin);
+ __ bind(&check_unequal_objects);
+ if (cc_ == equal && !strict_) {
+ // Non-strict equality. Objects are unequal if
+ // they are both JSObjects and not undetectable,
+ // and their pointers are different.
+ Label not_both_objects;
+ Label return_unequal;
+ // At most one is a smi, so we can test for smi by adding the two.
+ // A smi plus a heap object has the low bit set, a heap object plus
+ // a heap object has the low bit clear.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(1, kSmiTagMask);
+ __ lea(ecx, Operand(eax, edx, times_1, 0));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, ¬_both_objects);
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(below, ¬_both_objects);
+ __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
+ __ j(below, ¬_both_objects);
+ // We do not bail out after this point. Both are JSObjects, and
+ // they are equal if and only if both are undetectable.
+ // The and of the undetectable flags is 1 if and only if they are equal.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal);
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &return_unequal);
+ // The objects are both undetectable, so they both compare as the value
+ // undefined, and are equal.
+ __ Set(eax, Immediate(EQUAL));
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax,
+ // or return equal if we fell through to here.
+ __ ret(2 * kPointerSize); // rax, rdx were pushed
+ __ bind(¬_both_objects);
+ }
+
// must swap argument order
__ pop(ecx);
__ pop(edx);
@@ -13293,6 +13331,9 @@
__ test(edx, Immediate(kSmiTagMask));
__ j(not_zero, &runtime);
__ sub(ecx, Operand(edx));
+ __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
+ Label return_eax;
+ __ j(equal, &return_eax);
// Special handling of sub-strings of length 1 and 2. One character strings
// are handled in the runtime system (looked up in the single character
// cache). Two character strings are looked for in the symbol cache.
@@ -13397,6 +13438,8 @@
// esi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
__ mov(esi, edx); // Restore esi.
+
+ __ bind(&return_eax);
__ IncrementCounter(&Counters::sub_string_native, 1);
__ ret(3 * kPointerSize);
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index c750444..13173e2 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -2175,7 +2175,7 @@
// LAST_JS_OBJECT_TYPE.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ cmp(ebx, JS_FUNCTION_TYPE);
+ __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
__ j(equal, &function);
// Check if the constructor in the map is a function.
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index b0c07b7..062f0f2 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -45,72 +45,77 @@
#define __ ACCESS_MASM(masm)
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if the receiver has fast properties,
-// or if name is not a symbol, and will jump to the miss_label in that case.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Register name,
- Register r0,
- Register r1,
- Register r2,
- Register result,
- DictionaryCheck check_dictionary) {
- // Register use:
- //
- // name - holds the name of the property and is unchanged.
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // r0 - used to hold the property dictionary.
- //
- // r1 - used for the index into the property dictionary
- //
- // r2 - used to hold the capacity of the property dictionary.
- //
- // result - holds the result on exit.
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
+ __ j(equal, global_object, not_taken);
+ __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
+ __ j(equal, global_object, not_taken);
+ __ cmp(type, JS_GLOBAL_PROXY_TYPE);
+ __ j(equal, global_object, not_taken);
+}
- Label done;
- // Check for the absence of an interceptor.
- // Load the map into r0.
- __ mov(r0, FieldOperand(receiver, JSObject::kMapOffset));
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register r0,
+ Register r1,
+ Label* miss) {
+ // Register usage:
+ // receiver: holds the receiver on entry and is unchanged.
+ // r0: used to hold receiver instance type.
+ // Holds the property dictionary on fall through.
+ // r1: used to hold receivers map.
- // Bail out if the receiver has a named interceptor.
- __ test(FieldOperand(r0, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNamedInterceptor));
- __ j(not_zero, miss_label, not_taken);
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss, not_taken);
- // Bail out if we have a JS global proxy object.
- __ movzx_b(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
- __ cmp(r0, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, miss_label, not_taken);
+ // Check that the receiver is a valid JS object.
+ __ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r0, FIRST_JS_OBJECT_TYPE);
+ __ j(below, miss, not_taken);
- // Possible work-around for http://crbug.com/16276.
- __ cmp(r0, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, miss_label, not_taken);
- __ cmp(r0, JS_BUILTINS_OBJECT_TYPE);
- __ j(equal, miss_label, not_taken);
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- // Load properties array.
+ GenerateGlobalInstanceTypeCheck(masm, r0, miss);
+
+ // Check for non-global object that requires access check.
+ __ test_b(FieldOperand(r1, Map::kBitFieldOffset),
+ (1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasNamedInterceptor));
+ __ j(not_zero, miss, not_taken);
+
__ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ CheckMap(r0, Factory::hash_table_map(), miss, true);
+}
- // Check that the properties array is a dictionary.
- if (check_dictionary == CHECK_DICTIONARY) {
- __ cmp(FieldOperand(r0, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- __ j(not_equal, miss_label);
- }
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r0|. Jump to the |miss| label
+// otherwise.
+static void GenerateStringDictionaryProbes(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1) {
// Compute the capacity mask.
const int kCapacityOffset =
StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
- __ mov(r2, FieldOperand(r0, kCapacityOffset));
- __ shr(r2, kSmiTagSize); // convert smi to int
- __ dec(r2);
+ __ mov(r1, FieldOperand(elements, kCapacityOffset));
+ __ shr(r1, kSmiTagSize); // convert smi to int
+ __ dec(r1);
// Generate an unrolled loop that performs a few probes before
// giving up. Measurements done on Gmail indicate that 2 probes
@@ -121,37 +126,147 @@
StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ mov(r1, FieldOperand(name, String::kHashFieldOffset));
- __ shr(r1, String::kHashShift);
+ __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
+ __ shr(r0, String::kHashShift);
if (i > 0) {
- __ add(Operand(r1), Immediate(StringDictionary::GetProbeOffset(i)));
+ __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
}
- __ and_(r1, Operand(r2));
+ __ and_(r0, Operand(r1));
// Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
+ __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
// Check if the key is identical to the name.
- __ cmp(name,
- Operand(r0, r1, times_4, kElementsStartOffset - kHeapObjectTag));
+ __ cmp(name, Operand(elements, r0, times_4,
+ kElementsStartOffset - kHeapObjectTag));
if (i != kProbes - 1) {
- __ j(equal, &done, taken);
+ __ j(equal, done, taken);
} else {
- __ j(not_equal, miss_label, not_taken);
+ __ j(not_equal, miss, not_taken);
}
}
+}
- // Check that the value is a normal property.
+
+
+// Helper function used to load a property from a dictionary backing
+// storage. This function may fail to load a property even though it is
+// in the dictionary, so code at miss_label must always call a backup
+// property load that is complete. This function is safe to call if
+// name is not a symbol, and will jump to the miss_label in that
+// case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss_label,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1,
+ Register result) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is unchanged.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // Scratch registers:
+ //
+ // r0 - used for the index into the property dictionary
+ //
+ // r1 - used to hold the capacity of the property dictionary.
+ //
+ // result - holds the result on exit.
+
+ Label done;
+
+ // Probe the dictionary.
+ GenerateStringDictionaryProbes(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
+
+ // If probing finds an entry in the dictionary, r0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property.
__ bind(&done);
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ test(Operand(r0, r1, times_4, kDetailsOffset - kHeapObjectTag),
+ __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ j(not_zero, miss_label, not_taken);
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ mov(result, Operand(r0, r1, times_4, kValueOffset - kHeapObjectTag));
+ __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to store a property to a dictionary backing
+// storage. This function may fail to store a property eventhough it
+// is in the dictionary, so code at miss_label must always call a
+// backup property store that is complete. This function is safe to
+// call if name is not a symbol, and will jump to the miss_label in
+// that case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss_label,
+ Register elements,
+ Register name,
+ Register value,
+ Register r0,
+ Register r1) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is clobbered.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // value - holds the value to store and is unchanged.
+ //
+ // r0 - used for index into the property dictionary and is clobbered.
+ //
+ // r1 - used to hold the capacity of the property dictionary and is clobbered.
+ Label done;
+
+
+ // Probe the dictionary.
+ GenerateStringDictionaryProbes(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
+
+ // If probing finds an entry in the dictionary, r0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property that is not read only.
+ __ bind(&done);
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask
+ = (PropertyDetails::TypeField::mask() |
+ PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
+ Immediate(kTypeAndReadOnlyMask));
+ __ j(not_zero, miss_label, not_taken);
+
+ // Store the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+ __ mov(Operand(r0, 0), value);
+
+ // Update write barrier. Make sure not to clobber the value.
+ __ mov(r1, value);
+ __ RecordWrite(elements, r0, r1);
}
@@ -307,6 +422,7 @@
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register map,
+ int interceptor_bit,
Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
@@ -322,7 +438,7 @@
// Check bit field.
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
- KeyedLoadIC::kSlowCaseBitFieldMask);
+ (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
__ j(not_zero, slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
@@ -432,8 +548,6 @@
Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary, check_number_dictionary;
- GenerateKeyedLoadReceiverCheck(masm, edx, ecx, &slow);
-
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &check_string, not_taken);
@@ -441,6 +555,9 @@
// Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi.
+ GenerateKeyedLoadReceiverCheck(
+ masm, edx, ecx, Map::kHasIndexedInterceptor, &slow);
+
GenerateFastArrayLoad(masm,
edx,
eax,
@@ -503,6 +620,9 @@
__ bind(&check_string);
GenerateKeyStringCheck(masm, eax, ecx, ebx, &index_string, &slow);
+ GenerateKeyedLoadReceiverCheck(
+ masm, edx, ecx, Map::kHasNamedInterceptor, &slow);
+
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
__ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
@@ -555,15 +675,12 @@
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
- GenerateDictionaryLoad(masm,
- &slow,
- edx,
- eax,
- ebx,
- ecx,
- edi,
- eax,
- DICTIONARY_CHECK_DONE);
+
+ __ mov(ecx, FieldOperand(edx, JSObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, ecx, &slow);
+
+ GenerateDictionaryLoad(masm, &slow, ebx, eax, ecx, edi, eax);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
@@ -1173,24 +1290,18 @@
}
-static void GenerateNormalHelper(MacroAssembler* masm,
- int argc,
- bool is_global_object,
- Label* miss) {
+static void GenerateFunctionTailCall(MacroAssembler* masm,
+ int argc,
+ Label* miss) {
// ----------- S t a t e -------------
// -- ecx : name
- // -- edx : receiver
+ // -- edi : function
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- // Search dictionary - put result in register edi.
- __ mov(edi, edx);
- GenerateDictionaryLoad(
- masm, miss, edx, ecx, eax, edi, ebx, edi, CHECK_DICTIONARY);
-
// Check that the result is not a smi.
__ test(edi, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
@@ -1199,12 +1310,6 @@
__ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
__ j(not_equal, miss, not_taken);
- // Patch the receiver on stack with the global proxy if necessary.
- if (is_global_object) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
// Invoke the function.
ParameterCount actual(argc);
__ InvokeFunction(edi, actual, JUMP_FUNCTION);
@@ -1219,55 +1324,17 @@
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- Label miss, global_object, non_global_object;
+ Label miss;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- // Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
- // Check that the receiver is a valid JS object.
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(eax, FIRST_JS_OBJECT_TYPE);
- __ j(below, &miss, not_taken);
-
- // If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
- // Check for access to global object.
- __ cmp(eax, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, &global_object);
- __ cmp(eax, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &non_global_object);
-
- // Accessing global object: Load and invoke.
- __ bind(&global_object);
- // Check that the global object does not require access checks.
- __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
- __ j(not_equal, &miss, not_taken);
- GenerateNormalHelper(masm, argc, true, &miss);
-
- // Accessing non-global object: Check for access to global proxy.
- Label global_proxy, invoke;
- __ bind(&non_global_object);
- __ cmp(eax, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, &global_proxy, not_taken);
- // Check that the non-global, non-global-proxy object does not
- // require access checks.
- __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
- __ j(not_equal, &miss, not_taken);
- __ bind(&invoke);
- GenerateNormalHelper(masm, argc, false, &miss);
-
- // Global object proxy access: Check access rights.
- __ bind(&global_proxy);
- __ CheckAccessGlobalProxy(edx, eax, &miss);
- __ jmp(&invoke);
+ // eax: elements
+ // Search the dictionary placing the result in edi.
+ GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, edi);
+ GenerateFunctionTailCall(masm, argc, &miss);
__ bind(&miss);
}
@@ -1282,6 +1349,12 @@
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
+ if (id == IC::kCallIC_Miss) {
+ __ IncrementCounter(&Counters::call_miss, 1);
+ } else {
+ __ IncrementCounter(&Counters::keyed_call_miss, 1);
+ }
+
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -1303,25 +1376,28 @@
__ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
- Label invoke, global;
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &invoke, not_taken);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, &global);
- __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke);
+ // This can happen only for regular CallIC but not KeyedCallIC.
+ if (id == IC::kCallIC_Miss) {
+ Label invoke, global;
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &invoke, not_taken);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
+ __ j(equal, &global);
+ __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
+ __ j(not_equal, &invoke);
- // Patch the receiver on the stack.
- __ bind(&global);
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+ // Patch the receiver on the stack.
+ __ bind(&global);
+ __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+ __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+ __ bind(&invoke);
+ }
// Invoke the function.
ParameterCount actual(argc);
- __ bind(&invoke);
__ InvokeFunction(edi, actual, JUMP_FUNCTION);
}
@@ -1393,7 +1469,8 @@
// Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi.
- GenerateKeyedLoadReceiverCheck(masm, edx, eax, &slow_call);
+ GenerateKeyedLoadReceiverCheck(
+ masm, edx, eax, Map::kHasIndexedInterceptor, &slow_call);
GenerateFastArrayLoad(
masm, edx, ecx, eax, edi, &check_number_dictionary, &slow_load);
@@ -1403,15 +1480,7 @@
// receiver in edx is not used after this point.
// ecx: key
// edi: function
-
- // Check that the value in edi is a JavaScript function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &slow_call, not_taken);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
- __ j(not_equal, &slow_call, not_taken);
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+ GenerateFunctionTailCall(masm, argc, &slow_call);
__ bind(&check_number_dictionary);
// eax: elements
@@ -1451,15 +1520,13 @@
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(masm, edx, eax, &lookup_monomorphic_cache);
+ GenerateKeyedLoadReceiverCheck(
+ masm, edx, eax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
__ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- __ j(not_equal, &lookup_monomorphic_cache, not_taken);
+ __ CheckMap(ebx, Factory::hash_table_map(), &lookup_monomorphic_cache, true);
- GenerateDictionaryLoad(
- masm, &slow_load, edx, ecx, ebx, eax, edi, edi, DICTIONARY_CHECK_DONE);
+ GenerateDictionaryLoad(masm, &slow_load, ebx, ecx, eax, edi, edi);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
__ jmp(&do_call);
@@ -1539,49 +1606,15 @@
// -- ecx : name
// -- esp[0] : return address
// -----------------------------------
- Label miss, probe, global;
+ Label miss;
- // Check that the receiver isn't a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ GenerateStringDictionaryReceiverCheck(masm, eax, edx, ebx, &miss);
- // Check that the receiver is a valid JS object.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(edx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(edx, FIRST_JS_OBJECT_TYPE);
- __ j(less, &miss, not_taken);
-
- // If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
- // Check for access to global object (unlikely).
- __ cmp(edx, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, &global, not_taken);
-
- // Check for non-global object that requires access check.
- __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
- __ j(not_zero, &miss, not_taken);
-
+ // edx: elements
// Search the dictionary placing the result in eax.
- __ bind(&probe);
- GenerateDictionaryLoad(masm,
- &miss,
- eax,
- ecx,
- edx,
- edi,
- ebx,
- edi,
- CHECK_DICTIONARY);
- __ mov(eax, edi);
+ GenerateDictionaryLoad(masm, &miss, edx, ecx, edi, ebx, eax);
__ ret(0);
- // Global object access: Check access rights.
- __ bind(&global);
- __ CheckAccessGlobalProxy(eax, edx, &miss);
- __ jmp(&probe);
-
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
@@ -1595,6 +1628,8 @@
// -- esp[0] : return address
// -----------------------------------
+ __ IncrementCounter(&Counters::load_miss, 1);
+
__ pop(ebx);
__ push(eax); // receiver
__ push(ecx); // name
@@ -1711,6 +1746,8 @@
// -- esp[0] : return address
// -----------------------------------
+ __ IncrementCounter(&Counters::keyed_load_miss, 1);
+
__ pop(ebx);
__ push(edx); // receiver
__ push(eax); // name
@@ -1829,6 +1866,36 @@
}
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ Label miss, restore_miss;
+
+ GenerateStringDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
+
+ // A lot of registers are needed for storing to slow case
+ // objects. Push and restore receiver but rely on
+ // GenerateDictionaryStore preserving the value and name.
+ __ push(edx);
+ GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
+ __ Drop(1);
+ __ IncrementCounter(&Counters::store_normal_hit, 1);
+ __ ret(0);
+
+ __ bind(&restore_miss);
+ __ pop(edx);
+
+ __ bind(&miss);
+ __ IncrementCounter(&Counters::store_normal_miss, 1);
+ GenerateMiss(masm);
+}
+
+
// Defined in ic.cc.
Object* KeyedStoreIC_Miss(Arguments args);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index b83f9bc..b3f7c21 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -98,11 +98,6 @@
}
-// For page containing |object| mark region covering [object+offset] dirty.
-// object is the object being stored into, value is the object being stored.
-// If offset is zero, then the scratch register contains the array index into
-// the elements array represented as a Smi.
-// All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object, int offset,
Register value, Register scratch) {
// The compiled code assumes that record write doesn't change the
@@ -153,6 +148,39 @@
}
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
+ Register value) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are esi.
+ ASSERT(!object.is(esi) && !value.is(esi) && !address.is(esi));
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ ASSERT_EQ(0, kSmiTag);
+ test(value, Immediate(kSmiTagMask));
+ j(zero, &done);
+
+ InNewSpace(object, value, equal, &done);
+
+ RecordWriteHelper(object, address, value);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ mov(object, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(address, Immediate(BitCast<int32_t>(kZapValue)));
+ mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
cmp(esp,
Operand::StaticVariable(ExternalReference::address_of_stack_limit()));
@@ -514,97 +542,6 @@
}
-Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
- JSObject* holder, Register holder_reg,
- Register scratch,
- int save_at_depth,
- Label* miss) {
- // Make sure there's no overlap between scratch and the other
- // registers.
- ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- mov(Operand(esp, kPointerSize), object_reg);
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- while (object != holder) {
- depth++;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- JSObject* prototype = JSObject::cast(object->GetPrototype());
- if (Heap::InNewSpace(prototype)) {
- // Get the map of the current object.
- mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- cmp(Operand(scratch), Immediate(Handle<Map>(object->map())));
- // Branch on the result of the map check.
- j(not_equal, miss, not_taken);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
-
- // Restore scratch register to be the map of the object.
- // We load the prototype from the map in the scratch register.
- mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- }
- // The prototype is in new space; we cannot store a reference
- // to it in the code. Load it from the map.
- reg = holder_reg; // from now the object is in holder_reg
- mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
- } else {
- // Check the map of the current object.
- cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- // Branch on the result of the map check.
- j(not_equal, miss, not_taken);
- // Check access rights to the global object. This has to happen
- // after the map check so that we know that the object is
- // actually a global object.
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
- }
- // The prototype is in old space; load it directly.
- reg = holder_reg; // from now the object is in holder_reg
- mov(reg, Handle<JSObject>(prototype));
- }
-
- if (save_at_depth == depth) {
- mov(Operand(esp, kPointerSize), reg);
- }
-
- // Go to the next object in the prototype chain.
- object = prototype;
- }
-
- // Check the holder map.
- cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(holder->map())));
- j(not_equal, miss, not_taken);
-
- // Log the check depth.
- LOG(IntEvent("check-maps-depth", depth + 1));
-
- // Perform security check for access to the global object and return
- // the holder register.
- ASSERT(object == holder);
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
- if (object->IsJSGlobalProxy()) {
- CheckAccessGlobalProxy(reg, scratch, miss);
- }
- return reg;
-}
-
-
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 2018721..02cfd4d 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -73,16 +73,27 @@
Condition cc, // equal for new space, not_equal otherwise.
Label* branch);
- // For page containing |object| mark region covering [object+offset] dirty.
- // object is the object being stored into, value is the object being stored.
- // If offset is zero, then the scratch register contains the array index into
- // the elements array represented as a Smi.
- // All registers are clobbered by the operation.
+ // For page containing |object| mark region covering [object+offset]
+ // dirty. |object| is the object being stored into, |value| is the
+ // object being stored. If offset is zero, then the scratch register
+ // contains the array index into the elements array represented as a
+ // Smi. All registers are clobbered by the operation. RecordWrite
+ // filters out smis so it does not update the write barrier if the
+ // value is a smi.
void RecordWrite(Register object,
int offset,
Register value,
Register scratch);
+ // For page containing |object| mark region covering |address|
+ // dirty. |object| is the object being stored into, |value| is the
+ // object being stored. All registers are clobbered by the
+ // operation. RecordWrite filters out smis so it does not update the
+ // write barrier if the value is a smi.
+ void RecordWrite(Register object,
+ Register address,
+ Register value);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
@@ -233,24 +244,6 @@
// ---------------------------------------------------------------------------
// Inline caching support
- // Generates code that verifies that the maps of objects in the
- // prototype chain of object hasn't changed since the code was
- // generated and branches to the miss label if any map has. If
- // necessary the function also generates code for security check
- // in case of global object holders. The scratch and holder
- // registers are always clobbered, but the object register is only
- // clobbered if it the same as the holder register. The function
- // returns a register containing the holder - either object_reg or
- // holder_reg.
- // The function can optionally (when save_at_depth !=
- // kInvalidProtoDepth) save the object at the given depth by moving
- // it to [esp + kPointerSize].
- Register CheckMaps(JSObject* object, Register object_reg,
- JSObject* holder, Register holder_reg,
- Register scratch,
- int save_at_depth,
- Label* miss);
-
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register is clobbered.
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index bab0435..26361d1 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -101,6 +101,110 @@
}
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ String* name,
+ Register r0,
+ Register extra) {
+ ASSERT(name->IsSymbol());
+ __ IncrementCounter(&Counters::negative_lookups, 1);
+ __ IncrementCounter(&Counters::negative_lookups_miss, 1);
+
+ Label done;
+ __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ __ test(FieldOperand(r0, Map::kBitFieldOffset),
+ Immediate(kInterceptorOrAccessCheckNeededMask));
+ __ j(not_zero, miss_label, not_taken);
+
+ __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
+ __ j(below, miss_label, not_taken);
+
+ // Load properties array.
+ Register properties = r0;
+ __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+ // Check that the properties array is a dictionary.
+ __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
+ Immediate(Factory::hash_table_map()));
+ __ j(not_equal, miss_label);
+
+ // Compute the capacity mask.
+ const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up.
+ static const int kProbes = 4;
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+
+ // If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kProbes; i++) {
+ // r0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (extra.is(no_reg)) {
+ __ push(receiver);
+ }
+ Register index = extra.is(no_reg) ? receiver : extra;
+ // Capacity is smi 2^n.
+ __ mov(index, FieldOperand(properties, kCapacityOffset));
+ __ dec(index);
+ __ and_(Operand(index),
+ Immediate(Smi::FromInt(name->Hash() +
+ StringDictionary::GetProbeOffset(i))));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+
+ Register entity_name = extra.is(no_reg) ? properties : extra;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ __ cmp(entity_name, Factory::undefined_value());
+ if (extra.is(no_reg)) {
+ // 'receiver' shares a register with 'entity_name'.
+ __ pop(receiver);
+ }
+ if (i != kProbes - 1) {
+ __ j(equal, &done, taken);
+
+ // Stop if found the property.
+ __ cmp(entity_name, Handle<String>(name));
+ __ j(equal, miss_label, not_taken);
+
+ if (extra.is(no_reg)) {
+ // Restore the properties if their register was occupied by the name.
+ __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ }
+ } else {
+ // Give up probing if still not found the undefined value.
+ __ j(not_equal, miss_label, not_taken);
+ }
+ }
+
+ __ bind(&done);
+ __ DecrementCounter(&Counters::negative_lookups_miss, 1);
+}
+
+
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
@@ -723,6 +827,33 @@
}
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+static Object* GenerateCheckPropertyCells(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ Register scratch,
+ Label* miss) {
+ JSObject* current = object;
+ while (current != holder) {
+ if (current->IsGlobalObject()) {
+ Object* cell = GenerateCheckPropertyCell(masm,
+ GlobalObject::cast(current),
+ name,
+ scratch,
+ miss);
+ if (cell->IsFailure()) {
+ return cell;
+ }
+ }
+ ASSERT(current->IsJSObject());
+ current = JSObject::cast(current->GetPrototype());
+ }
+ return NULL;
+}
+
+
#undef __
#define __ ACCESS_MASM(masm())
@@ -733,33 +864,129 @@
Register holder_reg,
Register scratch,
String* name,
- int push_at_depth,
- Label* miss) {
- // Check that the maps haven't changed.
- Register result =
- masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,
- push_at_depth, miss);
+ int save_at_depth,
+ Label* miss,
+ Register extra) {
+ // Make sure there's no overlap between holder and object registers.
+ ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
+ ASSERT(!extra.is(object_reg) && !extra.is(holder_reg) && !extra.is(scratch));
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ JSObject* current = object;
+ int depth = 0;
+
+ if (save_at_depth == depth) {
+ __ mov(Operand(esp, kPointerSize), reg);
+ }
+
+ // Traverse the prototype chain and check the maps in the prototype chain for
+ // fast and global objects or do negative lookup for normal objects.
+ while (current != holder) {
+ depth++;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+
+ ASSERT(current->GetPrototype()->IsJSObject());
+ JSObject* prototype = JSObject::cast(current->GetPrototype());
+ if (!current->HasFastProperties() &&
+ !current->IsJSGlobalObject() &&
+ !current->IsJSGlobalProxy()) {
+ if (!name->IsSymbol()) {
+ Object* lookup_result = Heap::LookupSymbol(name);
+ if (lookup_result->IsFailure()) {
+ set_failure(Failure::cast(lookup_result));
+ return reg;
+ } else {
+ name = String::cast(lookup_result);
+ }
+ }
+ ASSERT(current->property_dictionary()->FindEntry(name) ==
+ StringDictionary::kNotFound);
+
+ GenerateDictionaryNegativeLookup(masm(),
+ miss,
+ reg,
+ name,
+ scratch,
+ extra);
+ __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // from now the object is in holder_reg
+ __ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+ } else if (Heap::InNewSpace(prototype)) {
+ // Get the map of the current object.
+ __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ __ cmp(Operand(scratch), Immediate(Handle<Map>(current->map())));
+ // Branch on the result of the map check.
+ __ j(not_equal, miss, not_taken);
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch, miss);
+
+ // Restore scratch register to be the map of the object.
+ // We load the prototype from the map in the scratch register.
+ __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ }
+ // The prototype is in new space; we cannot store a reference
+ // to it in the code. Load it from the map.
+ reg = holder_reg; // from now the object is in holder_reg
+ __ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+ } else {
+ // Check the map of the current object.
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(current->map())));
+ // Branch on the result of the map check.
+ __ j(not_equal, miss, not_taken);
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch, miss);
+ }
+ // The prototype is in old space; load it directly.
+ reg = holder_reg; // from now the object is in holder_reg
+ __ mov(reg, Handle<JSObject>(prototype));
+ }
+
+ if (save_at_depth == depth) {
+ __ mov(Operand(esp, kPointerSize), reg);
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ }
+ ASSERT(current == holder);
+
+ // Log the check depth.
+ LOG(IntEvent("check-maps-depth", depth + 1));
+
+ // Check the holder map.
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ Immediate(Handle<Map>(holder->map())));
+ __ j(not_equal, miss, not_taken);
+
+ // Perform security check for access to the global object.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+ if (holder->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch, miss);
+ };
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed. We also need to check that the
// property cell for the property is still empty.
- while (object != holder) {
- if (object->IsGlobalObject()) {
- Object* cell = GenerateCheckPropertyCell(masm(),
- GlobalObject::cast(object),
- name,
- scratch,
- miss);
- if (cell->IsFailure()) {
- set_failure(Failure::cast(cell));
- return result;
- }
- }
- object = JSObject::cast(object->GetPrototype());
- }
+ Object* result = GenerateCheckPropertyCells(masm(),
+ object,
+ holder,
+ name,
+ scratch,
+ miss);
+ if (result->IsFailure()) set_failure(Failure::cast(result));
// Return the register containing the holder.
- return result;
+ return reg;
}
@@ -1083,7 +1310,8 @@
__ j(zero, &miss, not_taken);
// Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
+ Register reg = CheckPrototypes(object, edx, holder, ebx, eax,
+ name, &miss, edi);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
@@ -1145,7 +1373,7 @@
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
- eax, name, &miss);
+ eax, name, &miss, edi);
if (argc == 0) {
// Noop, return the length.
@@ -1291,7 +1519,7 @@
__ j(zero, &miss);
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
- eax, name, &miss);
+ eax, name, &miss, edi);
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1366,7 +1594,7 @@
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss);
+ ebx, edx, name, &miss, edi);
Register receiver = ebx;
Register index = edi;
@@ -1431,7 +1659,7 @@
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss);
+ ebx, edx, name, &miss, edi);
Register receiver = eax;
Register index = edi;
@@ -1536,7 +1764,7 @@
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, name, depth, &miss);
+ ebx, eax, name, depth, &miss, edi);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -1559,7 +1787,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss);
+ ebx, edx, name, &miss, edi);
}
break;
@@ -1579,7 +1807,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss);
+ ebx, edx, name, &miss, edi);
}
break;
}
@@ -1600,7 +1828,7 @@
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
- ebx, edx, name, &miss);
+ ebx, edx, name, &miss, edi);
}
break;
}
@@ -1722,7 +1950,7 @@
}
// Check that the maps haven't changed.
- CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
+ CheckPrototypes(object, edx, holder, ebx, eax, name, &miss, edi);
// Get the value from the cell.
__ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
@@ -1993,6 +2221,8 @@
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
+ ASSERT(last->IsGlobalObject() || last->HasFastProperties());
+
// Check the maps of the full prototype chain. Also check that
// global property cells up to (but not including) the last object
// in the prototype chain are empty.
@@ -2140,7 +2370,7 @@
}
// Check that the maps haven't changed.
- CheckPrototypes(object, eax, holder, ebx, edx, name, &miss);
+ CheckPrototypes(object, eax, holder, ebx, edx, name, &miss, edi);
// Get the value from the cell.
__ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
diff --git a/src/ic-inl.h b/src/ic-inl.h
index 131f77b..70bbaf8 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -80,11 +80,38 @@
}
-Map* IC::GetCodeCacheMapForObject(Object* object) {
- if (object->IsJSObject()) return JSObject::cast(object)->map();
+InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
+ JSObject* holder) {
+ if (object->IsJSObject()) {
+ return GetCodeCacheForObject(JSObject::cast(object), holder);
+ }
// If the object is a value, we use the prototype map for the cache.
ASSERT(object->IsString() || object->IsNumber() || object->IsBoolean());
- return JSObject::cast(object->GetPrototype())->map();
+ return PROTOTYPE_MAP;
+}
+
+
+InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
+ JSObject* holder) {
+ // Fast-properties and global objects store stubs in their own maps.
+ // Slow properties objects use prototype's map (unless the property is its own
+ // when holder == object). It works because slow properties objects having
+ // the same prototype (or a prototype with the same map) and not having
+ // the property are interchangeable for such a stub.
+ if (holder != object &&
+ !object->HasFastProperties() &&
+ !object->IsJSGlobalProxy() &&
+ !object->IsJSGlobalObject()) {
+ return PROTOTYPE_MAP;
+ }
+ return OWN_MAP;
+}
+
+
+Map* IC::GetCodeCacheMap(Object* object, InlineCacheHolderFlag holder) {
+ Object* map_owner = (holder == OWN_MAP ? object : object->GetPrototype());
+ ASSERT(map_owner->IsJSObject());
+ return JSObject::cast(map_owner)->map();
}
diff --git a/src/ic.cc b/src/ic.cc
index 475f161..cdb06ac 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -134,13 +134,45 @@
}
#endif
+
+static bool HasNormalObjectsInPrototypeChain(LookupResult* lookup,
+ Object* receiver) {
+ Object* end = lookup->IsProperty() ? lookup->holder() : Heap::null_value();
+ for (Object* current = receiver;
+ current != end;
+ current = current->GetPrototype()) {
+ if (current->IsJSObject() &&
+ !JSObject::cast(current)->HasFastProperties() &&
+ !current->IsJSGlobalProxy() &&
+ !current->IsJSGlobalObject()) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
IC::State state = target->ic_state();
if (state != MONOMORPHIC) return state;
if (receiver->IsUndefined() || receiver->IsNull()) return state;
- Map* map = GetCodeCacheMapForObject(receiver);
+ InlineCacheHolderFlag cache_holder =
+ Code::ExtractCacheHolderFromFlags(target->flags());
+
+
+ if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
+ // The stub was generated for JSObject but called for non-JSObject.
+ // IC::GetCodeCacheMap is not applicable.
+ return MONOMORPHIC;
+ } else if (cache_holder == PROTOTYPE_MAP &&
+ receiver->GetPrototype()->IsNull()) {
+ // IC::GetCodeCacheMap is not applicable.
+ return MONOMORPHIC;
+ }
+ Map* map = IC::GetCodeCacheMap(receiver, cache_holder);
// Decide whether the inline cache failed because of changes to the
// receiver itself or changes to one of its prototypes.
@@ -487,12 +519,24 @@
void CallICBase::UpdateCaches(LookupResult* lookup,
- State state,
- Handle<Object> object,
- Handle<String> name) {
+ State state,
+ Handle<Object> object,
+ Handle<String> name) {
// Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
+#ifndef V8_TARGET_ARCH_IA32
+ // Normal objects only implemented for IA32 by now.
+ if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
+#else
+ if (lookup->holder() != *object &&
+ HasNormalObjectsInPrototypeChain(lookup, object->GetPrototype())) {
+ // Suppress optimization for prototype chains with slow properties objects
+ // in the middle.
+ return;
+ }
+#endif
+
// Compute the number of arguments.
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
@@ -590,8 +634,13 @@
state == MONOMORPHIC_PROTOTYPE_FAILURE) {
set_target(Code::cast(code));
} else if (state == MEGAMORPHIC) {
+ // Cache code holding map should be consistent with
+ // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
+ Map* map = JSObject::cast(object->IsJSObject() ? *object :
+ object->GetPrototype())->map();
+
// Update the stub cache.
- StubCache::Set(*name, GetCodeCacheMapForObject(*object), Code::cast(code));
+ StubCache::Set(*name, map, Code::cast(code));
}
#ifdef DEBUG
@@ -795,6 +844,8 @@
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
+
// Compute the code stub for this load.
Object* code = NULL;
if (state == UNINITIALIZED) {
@@ -836,7 +887,7 @@
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return;
- code = StubCache::ComputeLoadNormal(*name, *receiver);
+ code = StubCache::ComputeLoadNormal();
}
break;
}
@@ -871,8 +922,12 @@
} else if (state == MONOMORPHIC) {
set_target(megamorphic_stub());
} else if (state == MEGAMORPHIC) {
- // Update the stub cache.
- StubCache::Set(*name, GetCodeCacheMapForObject(*object), Code::cast(code));
+ // Cache code holding map should be consistent with
+ // GenerateMonomorphicCacheProbe.
+ Map* map = JSObject::cast(object->IsJSObject() ? *object :
+ object->GetPrototype())->map();
+
+ StubCache::Set(*name, map, Code::cast(code));
}
#ifdef DEBUG
@@ -992,12 +1047,14 @@
}
}
set_target(stub);
- // For JSObjects that are not value wrappers and that do not have
- // indexed interceptors, we initialize the inlined fast case (if
- // present) by patching the inlined map check.
+ // For JSObjects with fast elements that are not value wrappers
+ // and that do not have indexed interceptors, we initialize the
+ // inlined fast case (if present) by patching the inlined map
+ // check.
if (object->IsJSObject() &&
!object->IsJSValue() &&
- !JSObject::cast(*object)->HasIndexedInterceptor()) {
+ !JSObject::cast(*object)->HasIndexedInterceptor() &&
+ JSObject::cast(*object)->HasFastElements()) {
Map* map = JSObject::cast(*object)->map();
PatchInlinedLoad(address(), map);
}
@@ -1016,6 +1073,8 @@
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
+
// Compute the code stub for this load.
Object* code = NULL;
@@ -1196,16 +1255,18 @@
break;
}
case NORMAL: {
- if (!receiver->IsGlobalObject()) {
- return;
+ if (receiver->IsGlobalObject()) {
+ // The stub generated for the global object picks the value directly
+ // from the property cell. So the property must be directly on the
+ // global object.
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+ JSGlobalPropertyCell* cell =
+ JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
+ code = StubCache::ComputeStoreGlobal(*name, *global, cell);
+ } else {
+ if (lookup->holder() != *receiver) return;
+ code = StubCache::ComputeStoreNormal();
}
- // The stub generated for the global object picks the value directly
- // from the property cell. So the property must be directly on the
- // global object.
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
- JSGlobalPropertyCell* cell =
- JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
- code = StubCache::ComputeStoreGlobal(*name, *global, cell);
break;
}
case CALLBACKS: {
diff --git a/src/ic.h b/src/ic.h
index 5fd5078..0d5df96 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -33,10 +33,6 @@
namespace v8 {
namespace internal {
-// Flag indicating whether an IC stub needs to check that a backing
-// store is in dictionary case.
-enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
-
// IC_UTIL_LIST defines all utility functions called from generated
// inline caching code. The argument for the macro, ICU, is the function name.
@@ -121,9 +117,14 @@
return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
}
- // Returns the map to use for caching stubs for a given object.
- // This method should not be called with undefined or null.
- static inline Map* GetCodeCacheMapForObject(Object* object);
+ // Determines which map must be used for keeping the code stub.
+ // These methods should not be called with undefined or null.
+ static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object,
+ JSObject* holder);
+ static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object,
+ JSObject* holder);
+ static inline Map* GetCodeCacheMap(Object* object,
+ InlineCacheHolderFlag holder);
protected:
Address fp() const { return fp_; }
@@ -388,6 +389,7 @@
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateArrayLength(MacroAssembler* masm);
+ static void GenerateNormal(MacroAssembler* masm);
private:
// Update the inline cache and the global stub cache based on the
diff --git a/src/json.js b/src/json.js
index 3e42d36..cdb10be 100644
--- a/src/json.js
+++ b/src/json.js
@@ -207,7 +207,7 @@
} else if (IS_STRING_WRAPPER(value)) {
value = $String(value);
} else if (IS_BOOLEAN_WRAPPER(value)) {
- value = $Boolean(value);
+ value = %_ValueOf(value);
}
}
switch (typeof value) {
@@ -241,7 +241,7 @@
}
var gap;
if (IS_NUMBER(space)) {
- space = $Math.min(space, 10);
+ space = $Math.min(ToInteger(space), 10);
gap = "";
for (var i = 0; i < space; i++) {
gap += " ";
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 3e9c5ea..9f98782 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -356,7 +356,16 @@
if (!subject->IsFlat()) {
FlattenString(subject);
}
- bool is_ascii = subject->IsAsciiRepresentation();
+ // Check the asciiness of the underlying storage.
+ bool is_ascii;
+ {
+ AssertNoAllocation no_gc;
+ String* sequential_string = *subject;
+ if (subject->IsConsString()) {
+ sequential_string = ConsString::cast(*subject)->first();
+ }
+ is_ascii = sequential_string->IsAsciiRepresentation();
+ }
if (!EnsureCompiledIrregexp(regexp, is_ascii)) {
return -1;
}
@@ -381,6 +390,11 @@
ASSERT(index <= subject->length());
ASSERT(subject->IsFlat());
+ // A flat ASCII string might have a two-byte first part.
+ if (subject->IsConsString()) {
+ subject = Handle<String>(ConsString::cast(*subject)->first());
+ }
+
#ifndef V8_INTERPRETED_REGEXP
ASSERT(output.length() >=
(IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
@@ -407,7 +421,7 @@
// If result is RETRY, the string has changed representation, and we
// must restart from scratch.
// In this case, it means we must make sure we are prepared to handle
- // the, potentially, differen subject (the string can switch between
+ // the, potentially, different subject (the string can switch between
// being internal and external, and even between being ASCII and UC16,
// but the characters are always the same).
IrregexpPrepare(regexp, subject);
diff --git a/src/jump-target-heavy.h b/src/jump-target-heavy.h
index b923fe5..b2113a5 100644
--- a/src/jump-target-heavy.h
+++ b/src/jump-target-heavy.h
@@ -196,6 +196,8 @@
public:
// Construct a break target.
BreakTarget() {}
+ explicit BreakTarget(JumpTarget::Directionality direction)
+ : JumpTarget(direction) { }
virtual ~BreakTarget() {}
diff --git a/src/jump-target-light-inl.h b/src/jump-target-light-inl.h
index 0b4eee4..e8f1a5f 100644
--- a/src/jump-target-light-inl.h
+++ b/src/jump-target-light-inl.h
@@ -36,16 +36,20 @@
// Construct a jump target.
JumpTarget::JumpTarget(Directionality direction)
: entry_frame_set_(false),
+ direction_(direction),
entry_frame_(kInvalidVirtualFrameInitializer) {
}
JumpTarget::JumpTarget()
: entry_frame_set_(false),
+ direction_(FORWARD_ONLY),
entry_frame_(kInvalidVirtualFrameInitializer) {
}
BreakTarget::BreakTarget() { }
+BreakTarget::BreakTarget(JumpTarget::Directionality direction)
+ : JumpTarget(direction) { }
} } // namespace v8::internal
diff --git a/src/jump-target-light.h b/src/jump-target-light.h
index 084bd58..91b7266 100644
--- a/src/jump-target-light.h
+++ b/src/jump-target-light.h
@@ -120,6 +120,9 @@
// Has an entry frame been found?
bool entry_frame_set_;
+ // Can we branch backwards to this label?
+ Directionality direction_;
+
// The frame used on entry to the block and expected at backward
// jumps to the block. Set the first time something branches to this
// jump target.
@@ -150,6 +153,7 @@
public:
// Construct a break target.
inline BreakTarget();
+ inline BreakTarget(JumpTarget::Directionality direction);
virtual ~BreakTarget() {}
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index 34d5c0d..c8c6f08 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -51,7 +51,8 @@
// Applies the change to the script.
// The change is in form of list of chunks encoded in a single array as
// a series of triplets (pos1_start, pos1_end, pos2_end)
- function ApplyPatchMultiChunk(script, diff_array, new_source, change_log) {
+ function ApplyPatchMultiChunk(script, diff_array, new_source, preview_only,
+ change_log) {
var old_source = script.source;
@@ -96,7 +97,7 @@
}
// Recursively collects all newly compiled functions that are going into
- // business and should be have link to the actual script updated.
+ // business and should have link to the actual script updated.
function CollectNew(node_list) {
for (var i = 0; i < node_list.length; i++) {
link_to_original_script_list.push(node_list[i]);
@@ -121,6 +122,20 @@
}
}
+ var preview_description = {
+ change_tree: DescribeChangeTree(root_old_node),
+ textual_diff: {
+ old_len: old_source.length,
+ new_len: new_source.length,
+ chunks: diff_array
+ },
+ updated: false
+ };
+
+ if (preview_only) {
+ return preview_description;
+ }
+
HarvestTodo(root_old_node);
// Collect shared infos for functions whose code need to be patched.
@@ -132,13 +147,15 @@
}
}
- // Check that function being patched is not currently on stack.
- CheckStackActivations(replaced_function_infos, change_log);
-
-
// We haven't changed anything before this line yet.
// Committing all changes.
+ // Check that function being patched is not currently on stack or drop them.
+ var dropped_functions_number =
+ CheckStackActivations(replaced_function_infos, change_log);
+
+ preview_description.stack_modified = dropped_functions_number != 0;
+
// Start with breakpoints. Convert their line/column positions and
// temporary remove.
var break_points_restorer = TemporaryRemoveBreakPoints(script, change_log);
@@ -166,6 +183,8 @@
LinkToOldScript(link_to_old_script_list[i], old_script,
link_to_old_script_report);
}
+
+ preview_description.created_script_name = old_script_name;
}
// Link to an actual script all the functions that we are going to use.
@@ -189,6 +208,9 @@
}
break_points_restorer(pos_translator, old_script);
+
+ preview_description.updated = true;
+ return preview_description;
}
// Function is public.
this.ApplyPatchMultiChunk = ApplyPatchMultiChunk;
@@ -494,6 +516,16 @@
this.new_end_pos = void 0;
this.corresponding_node = void 0;
this.unmatched_new_nodes = void 0;
+
+ // 'Textual' correspondence/matching is weaker than 'pure'
+ // correspondence/matching. We need 'textual' level for visual presentation
+ // in UI, we use 'pure' level for actual code manipulation.
+ // Sometimes only function body is changed (functions in old and new script
+ // textually correspond), but we cannot patch the code, so we see them
+ // as an old function deleted and new function created.
+ this.textual_corresponding_node = void 0;
+ this.textually_unmatched_new_nodes = void 0;
+
this.live_shared_info_wrapper = void 0;
}
@@ -640,6 +672,7 @@
var new_children = new_node.children;
var unmatched_new_nodes_list = [];
+ var textually_unmatched_new_nodes_list = [];
var old_index = 0;
var new_index = 0;
@@ -650,6 +683,7 @@
if (new_children[new_index].info.start_position <
old_children[old_index].new_start_pos) {
unmatched_new_nodes_list.push(new_children[new_index]);
+ textually_unmatched_new_nodes_list.push(new_children[new_index]);
new_index++;
} else if (new_children[new_index].info.start_position ==
old_children[old_index].new_start_pos) {
@@ -657,6 +691,8 @@
old_children[old_index].new_end_pos) {
old_children[old_index].corresponding_node =
new_children[new_index];
+ old_children[old_index].textual_corresponding_node =
+ new_children[new_index];
if (old_children[old_index].status != FunctionStatus.UNCHANGED) {
ProcessChildren(old_children[old_index],
new_children[new_index]);
@@ -673,6 +709,7 @@
"No corresponding function in new script found";
old_node.status = FunctionStatus.CHANGED;
unmatched_new_nodes_list.push(new_children[new_index]);
+ textually_unmatched_new_nodes_list.push(new_children[new_index]);
}
new_index++;
old_index++;
@@ -694,21 +731,28 @@
while (new_index < new_children.length) {
unmatched_new_nodes_list.push(new_children[new_index]);
+ textually_unmatched_new_nodes_list.push(new_children[new_index]);
new_index++;
}
if (old_node.status == FunctionStatus.CHANGED) {
- if (!CompareFunctionExpectations(old_node.info, new_node.info)) {
+ var why_wrong_expectations =
+ WhyFunctionExpectationsDiffer(old_node.info, new_node.info);
+ if (why_wrong_expectations) {
old_node.status = FunctionStatus.DAMAGED;
- old_node.status_explanation = "Changed code expectations";
+ old_node.status_explanation = why_wrong_expectations;
}
}
old_node.unmatched_new_nodes = unmatched_new_nodes_list;
+ old_node.textually_unmatched_new_nodes =
+ textually_unmatched_new_nodes_list;
}
ProcessChildren(old_code_tree, new_code_tree);
old_code_tree.corresponding_node = new_code_tree;
+ old_code_tree.textual_corresponding_node = new_code_tree;
+
Assert(old_code_tree.status != FunctionStatus.DAMAGED,
"Script became damaged");
}
@@ -792,27 +836,37 @@
}
// Compares a function interface old and new version, whether it
- // changed or not.
- function CompareFunctionExpectations(function_info1, function_info2) {
+ // changed or not. Returns explanation if they differ.
+ function WhyFunctionExpectationsDiffer(function_info1, function_info2) {
// Check that function has the same number of parameters (there may exist
// an adapter, that won't survive function parameter number change).
if (function_info1.param_num != function_info2.param_num) {
- return false;
+ return "Changed parameter number: " + function_info1.param_num +
+ " and " + function_info2.param_num;
}
var scope_info1 = function_info1.scope_info;
var scope_info2 = function_info2.scope_info;
-
- if (!scope_info1) {
- return !scope_info2;
+
+ var scope_info1_text;
+ var scope_info2_text;
+
+ if (scope_info1) {
+ scope_info1_text = scope_info1.toString();
+ } else {
+ scope_info1_text = "";
}
-
- if (scope_info1.length != scope_info2.length) {
- return false;
+ if (scope_info2) {
+ scope_info2_text = scope_info2.toString();
+ } else {
+ scope_info2_text = "";
}
-
- // Check that outer scope structure is not changed. Otherwise the function
- // will not properly work with existing scopes.
- return scope_info1.toString() == scope_info2.toString();
+
+ if (scope_info1_text != scope_info2_text) {
+ return "Incompatible variable maps: [" + scope_info1_text +
+ "] and [" + scope_info2_text + "]";
+ }
+ // No differences. Return undefined.
+ return;
}
// Minifier forward declaration.
@@ -856,6 +910,8 @@
change_log.push( { functions_on_stack: problems } );
throw new Failure("Blocked by functions on stack");
}
+
+ return dropped.length;
}
// A copy of the FunctionPatchabilityStatus enum from liveedit.h
@@ -897,14 +953,11 @@
this.GetPcFromSourcePos = GetPcFromSourcePos;
// LiveEdit main entry point: changes a script text to a new string.
- function SetScriptSource(script, new_source, change_log) {
+ function SetScriptSource(script, new_source, preview_only, change_log) {
var old_source = script.source;
var diff = CompareStringsLinewise(old_source, new_source);
- if (diff.length == 0) {
- change_log.push( { empty_diff: true } );
- return;
- }
- ApplyPatchMultiChunk(script, diff, new_source, change_log);
+ return ApplyPatchMultiChunk(script, diff, new_source, preview_only,
+ change_log);
}
// Function is public.
this.SetScriptSource = SetScriptSource;
@@ -931,7 +984,67 @@
return ApplyPatchMultiChunk(script,
[ change_pos, change_pos + change_len, change_pos + new_str.length],
- new_source, change_log);
+ new_source, false, change_log);
+ }
+
+ // Creates JSON description for a change tree.
+ function DescribeChangeTree(old_code_tree) {
+
+ function ProcessOldNode(node) {
+ var child_infos = [];
+ for (var i = 0; i < node.children.length; i++) {
+ var child = node.children[i];
+ if (child.status != FunctionStatus.UNCHANGED) {
+ child_infos.push(ProcessOldNode(child));
+ }
+ }
+ var new_child_infos = [];
+ if (node.textually_unmatched_new_nodes) {
+ for (var i = 0; i < node.textually_unmatched_new_nodes.length; i++) {
+ var child = node.textually_unmatched_new_nodes[i];
+ new_child_infos.push(ProcessNewNode(child));
+ }
+ }
+ var res = {
+ name: node.info.function_name,
+ positions: DescribePositions(node),
+ status: node.status,
+ children: child_infos,
+ new_children: new_child_infos
+ };
+ if (node.status_explanation) {
+ res.status_explanation = node.status_explanation;
+ }
+ if (node.textual_corresponding_node) {
+ res.new_positions = DescribePositions(node.textual_corresponding_node);
+ }
+ return res;
+ }
+
+ function ProcessNewNode(node) {
+ var child_infos = [];
+ // Do not list ancestors.
+ if (false) {
+ for (var i = 0; i < node.children.length; i++) {
+ child_infos.push(ProcessNewNode(node.children[i]));
+ }
+ }
+ var res = {
+ name: node.info.function_name,
+ positions: DescribePositions(node),
+ children: child_infos,
+ };
+ return res;
+ }
+
+ function DescribePositions(node) {
+ return {
+ start_position: node.info.start_position,
+ end_position: node.info.end_position
+ };
+ }
+
+ return ProcessOldNode(old_code_tree);
}
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 950f8e0..04631a3 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -1187,7 +1187,12 @@
// Returns error message or NULL.
static const char* DropFrames(Vector<StackFrame*> frames,
int top_frame_index,
- int bottom_js_frame_index) {
+ int bottom_js_frame_index,
+ Debug::FrameDropMode* mode) {
+ if (Debug::kFrameDropperFrameSize < 0) {
+ return "Stack manipulations are not supported in this architecture.";
+ }
+
StackFrame* pre_top_frame = frames[top_frame_index - 1];
StackFrame* top_frame = frames[top_frame_index];
StackFrame* bottom_js_frame = frames[bottom_js_frame_index];
@@ -1198,12 +1203,18 @@
if (pre_top_frame->code()->is_inline_cache_stub() &&
pre_top_frame->code()->ic_state() == DEBUG_BREAK) {
// OK, we can drop inline cache calls.
+ *mode = Debug::FRAME_DROPPED_IN_IC_CALL;
+ } else if (pre_top_frame->code() == Debug::debug_break_slot()) {
+ // OK, we can drop debug break slot.
+ *mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
} else if (pre_top_frame->code() ==
Builtins::builtin(Builtins::FrameDropper_LiveEdit)) {
// OK, we can drop our own code.
+ *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
} else if (pre_top_frame->code()->kind() == Code::STUB &&
pre_top_frame->code()->major_key()) {
- // Unit Test entry, it's fine, we support this case.
+ // Entry from our unit tests, it's fine, we support this case.
+ *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
} else {
return "Unknown structure of stack above changing function";
}
@@ -1316,8 +1327,9 @@
return NULL;
}
+ Debug::FrameDropMode drop_mode = Debug::FRAMES_UNTOUCHED;
const char* error_message = DropFrames(frames, top_frame_index,
- bottom_js_frame_index);
+ bottom_js_frame_index, &drop_mode);
if (error_message != NULL) {
return error_message;
@@ -1331,7 +1343,7 @@
break;
}
}
- Debug::FramesHaveBeenDropped(new_id);
+ Debug::FramesHaveBeenDropped(new_id, drop_mode);
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
diff --git a/src/log.cc b/src/log.cc
index ada73cb..e083f01 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -309,10 +309,10 @@
void Profiler::Run() {
TickSample sample;
- bool overflow = Logger::profiler_->Remove(&sample);
+ bool overflow = Remove(&sample);
while (running_) {
LOG(TickEvent(&sample, overflow));
- overflow = Logger::profiler_->Remove(&sample);
+ overflow = Remove(&sample);
}
}
@@ -1150,7 +1150,7 @@
int Logger::GetActiveProfilerModules() {
int result = PROFILER_MODULE_NONE;
- if (!profiler_->paused()) {
+ if (profiler_ != NULL && !profiler_->paused()) {
result |= PROFILER_MODULE_CPU;
}
if (FLAG_log_gc) {
@@ -1162,7 +1162,7 @@
void Logger::PauseProfiler(int flags, int tag) {
if (!Log::IsEnabled()) return;
- if (flags & PROFILER_MODULE_CPU) {
+ if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
// It is OK to have negative nesting.
if (--cpu_profiler_nesting_ == 0) {
profiler_->pause();
@@ -1193,7 +1193,7 @@
if (tag != 0) {
UncheckedIntEvent("open-tag", tag);
}
- if (flags & PROFILER_MODULE_CPU) {
+ if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
if (cpu_profiler_nesting_++ == 0) {
++logging_nesting_;
if (FLAG_prof_lazy) {
diff --git a/src/macros.py b/src/macros.py
index 7d97918..32c9651 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -120,6 +120,7 @@
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));
+macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0);
macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg));
diff --git a/src/messages.js b/src/messages.js
index a46af4a..7bac3b2 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -196,6 +196,7 @@
circular_structure: "Converting circular structure to JSON",
obj_ctor_property_non_object: "Object.%0 called on non-object",
array_indexof_not_defined: "Array.getIndexOf: Argument undefined",
+ object_not_extensible: "Can't add property %0, object is not extensible",
illegal_access: "illegal access"
};
}
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index b60e54d..0b5ff99 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -539,6 +539,9 @@
(map()->inobject_properties() + properties()->length() -
map()->NextFreePropertyIndex()));
}
+ ASSERT(map()->has_fast_elements() ==
+ (elements()->map() == Heap::fixed_array_map()));
+ ASSERT(map()->has_fast_elements() == HasFastElements());
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index d6571bf..79f2c97 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1166,6 +1166,8 @@
void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
+ ASSERT(map()->has_fast_elements() ==
+ (value->map() == Heap::fixed_array_map()));
// In the assert below Dictionary is covered under FixedArray.
ASSERT(value->IsFixedArray() || value->IsPixelArray() ||
value->IsExternalArray());
@@ -1181,11 +1183,21 @@
void JSObject::initialize_elements() {
+ ASSERT(map()->has_fast_elements());
ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
WRITE_FIELD(this, kElementsOffset, Heap::empty_fixed_array());
}
+Object* JSObject::ResetElements() {
+ Object* obj = map()->GetFastElementsMap();
+ if (obj->IsFailure()) return obj;
+ set_map(Map::cast(obj));
+ initialize_elements();
+ return this;
+}
+
+
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
@@ -1323,6 +1335,21 @@
}
+bool JSObject::HasFastProperties() {
+ return !properties()->IsDictionary();
+}
+
+
+int JSObject::MaxFastProperties() {
+ // Allow extra fast properties if the object has more than
+ // kMaxFastProperties in-object properties. When this is the case,
+ // it is very unlikely that the object is being used as a dictionary
+ // and there is a good chance that allowing more map transitions
+ // will be worth it.
+ return Max(map()->inobject_properties(), kMaxFastProperties);
+}
+
+
void Struct::InitializeBody(int object_size) {
Object* value = Heap::undefined_value();
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
@@ -1331,11 +1358,6 @@
}
-bool JSObject::HasFastProperties() {
- return !properties()->IsDictionary();
-}
-
-
bool Object::ToArrayIndex(uint32_t* index) {
if (IsSmi()) {
int value = Smi::cast(this)->value();
@@ -2177,6 +2199,20 @@
}
+void Map::set_is_extensible(bool value) {
+ if (value) {
+ set_bit_field2(bit_field2() | (1 << kIsExtensible));
+ } else {
+ set_bit_field2(bit_field2() & ~(1 << kIsExtensible));
+ }
+}
+
+bool Map::is_extensible() {
+ return ((1 << kIsExtensible) & bit_field2()) != 0;
+}
+
+
+
Code::Flags Code::flags() {
return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
}
@@ -2251,13 +2287,15 @@
InLoopFlag in_loop,
InlineCacheState ic_state,
PropertyType type,
- int argc) {
+ int argc,
+ InlineCacheHolderFlag holder) {
// Compute the bit mask.
int bits = kind << kFlagsKindShift;
if (in_loop) bits |= kFlagsICInLoopMask;
bits |= ic_state << kFlagsICStateShift;
bits |= type << kFlagsTypeShift;
bits |= argc << kFlagsArgumentsCountShift;
+ if (holder == PROTOTYPE_MAP) bits |= kFlagsCacheInPrototypeMapMask;
// Cast to flags and validate result before returning it.
Flags result = static_cast<Flags>(bits);
ASSERT(ExtractKindFromFlags(result) == kind);
@@ -2271,9 +2309,10 @@
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
PropertyType type,
+ InlineCacheHolderFlag holder,
InLoopFlag in_loop,
int argc) {
- return ComputeFlags(kind, in_loop, MONOMORPHIC, type, argc);
+ return ComputeFlags(kind, in_loop, MONOMORPHIC, type, argc, holder);
}
@@ -2306,6 +2345,12 @@
}
+InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
+ int bits = (flags & kFlagsCacheInPrototypeMapMask);
+ return bits != 0 ? PROTOTYPE_MAP : OWN_MAP;
+}
+
+
Code::Flags Code::RemoveTypeFromFlags(Flags flags) {
int bits = flags & ~kFlagsTypeMask;
return static_cast<Flags>(bits);
@@ -2335,6 +2380,26 @@
}
+Object* Map::GetFastElementsMap() {
+ if (has_fast_elements()) return this;
+ Object* obj = CopyDropTransitions();
+ if (obj->IsFailure()) return obj;
+ Map* new_map = Map::cast(obj);
+ new_map->set_has_fast_elements(true);
+ return new_map;
+}
+
+
+Object* Map::GetSlowElementsMap() {
+ if (!has_fast_elements()) return this;
+ Object* obj = CopyDropTransitions();
+ if (obj->IsFailure()) return obj;
+ Map* new_map = Map::cast(obj);
+ new_map->set_has_fast_elements(false);
+ return new_map;
+}
+
+
ACCESSORS(Map, instance_descriptors, DescriptorArray,
kInstanceDescriptorsOffset)
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
@@ -2838,11 +2903,14 @@
if (array->IsFixedArray()) {
// FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray.
if (array->map() == Heap::fixed_array_map()) {
+ ASSERT(map()->has_fast_elements());
return FAST_ELEMENTS;
}
ASSERT(array->IsDictionary());
+ ASSERT(!map()->has_fast_elements());
return DICTIONARY_ELEMENTS;
}
+ ASSERT(!map()->has_fast_elements());
if (array->IsExternalArray()) {
switch (array->map()->instance_type()) {
case EXTERNAL_BYTE_ARRAY_TYPE:
diff --git a/src/objects.cc b/src/objects.cc
index 63b77b7..8288f63 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -678,7 +678,7 @@
bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
- // Externalizing twice leaks the external resouce, so it's
+ // Externalizing twice leaks the external resource, so it's
// prohibited by the API.
ASSERT(!this->IsExternalString());
#ifdef DEBUG
@@ -1276,7 +1276,7 @@
}
if (map()->unused_property_fields() == 0) {
- if (properties()->length() > kMaxFastProperties) {
+ if (properties()->length() > MaxFastProperties()) {
Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (obj->IsFailure()) return obj;
return AddSlowProperty(name, value, attributes);
@@ -1386,6 +1386,11 @@
Object* value,
PropertyAttributes attributes) {
ASSERT(!IsJSGlobalProxy());
+ if (!map()->is_extensible()) {
+ Handle<Object> args[1] = {Handle<String>(name)};
+ return Top::Throw(*Factory::NewTypeError("object_not_extensible",
+ HandleVector(args, 1)));
+ }
if (HasFastProperties()) {
// Ensure the descriptor array does not get too big.
if (map()->instance_descriptors()->number_of_descriptors() <
@@ -1474,7 +1479,7 @@
Object* new_value,
PropertyAttributes attributes) {
if (map()->unused_property_fields() == 0 &&
- properties()->length() > kMaxFastProperties) {
+ properties()->length() > MaxFastProperties()) {
Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
if (obj->IsFailure()) return obj;
return ReplaceSlowProperty(name, new_value, attributes);
@@ -1746,8 +1751,6 @@
result->DictionaryResult(this, entry);
return;
}
- // Slow case object skipped during lookup. Do not use inline caching.
- if (!IsGlobalObject()) result->DisallowCaching();
}
result->NotFound();
}
@@ -2222,6 +2225,11 @@
Object* JSObject::NormalizeElements() {
ASSERT(!HasPixelElements() && !HasExternalArrayElements());
if (HasDictionaryElements()) return this;
+ ASSERT(map()->has_fast_elements());
+
+ Object* obj = map()->GetSlowElementsMap();
+ if (obj->IsFailure()) return obj;
+ Map* new_map = Map::cast(obj);
// Get number of entries.
FixedArray* array = FixedArray::cast(elements());
@@ -2230,7 +2238,7 @@
int length = IsJSArray() ?
Smi::cast(JSArray::cast(this)->length())->value() :
array->length();
- Object* obj = NumberDictionary::Allocate(length);
+ obj = NumberDictionary::Allocate(length);
if (obj->IsFailure()) return obj;
NumberDictionary* dictionary = NumberDictionary::cast(obj);
// Copy entries.
@@ -2243,7 +2251,10 @@
dictionary = NumberDictionary::cast(result);
}
}
- // Switch to using the dictionary as the backing storage for elements.
+ // Switch to using the dictionary as the backing storage for
+ // elements. Set the new map first to satify the elements type
+ // assert in set_elements().
+ set_map(new_map);
set_elements(dictionary);
Counters::elements_to_dictionary.Increment();
@@ -2568,6 +2579,25 @@
}
+Object* JSObject::PreventExtensions() {
+ // If there are fast elements we normalize.
+ if (HasFastElements()) {
+ NormalizeElements();
+ }
+ // Make sure that we never go back to fast case.
+ element_dictionary()->set_requires_slow_elements();
+
+ // Do a map transition, other objects with this map may still
+ // be extensible.
+ Object* new_map = map()->CopyDropTransitions();
+ if (new_map->IsFailure()) return new_map;
+ Map::cast(new_map)->set_is_extensible(false);
+ set_map(Map::cast(new_map));
+ ASSERT(!map()->is_extensible());
+ return new_map;
+}
+
+
// Tests for the fast common case for property enumeration:
// - This object and all prototypes has an enum cache (which means that it has
// no interceptors and needs no access checks).
@@ -3068,7 +3098,7 @@
Object* descriptors = instance_descriptors()->RemoveTransitions();
if (descriptors->IsFailure()) return descriptors;
cast(new_map)->set_instance_descriptors(DescriptorArray::cast(descriptors));
- return cast(new_map);
+ return new_map;
}
@@ -5473,14 +5503,18 @@
#endif // ENABLE_DISASSEMBLER
-void JSObject::SetFastElements(FixedArray* elems) {
+Object* JSObject::SetFastElementsCapacityAndLength(int capacity, int length) {
// We should never end in here with a pixel or external array.
ASSERT(!HasPixelElements() && !HasExternalArrayElements());
-#ifdef DEBUG
- // Check the provided array is filled with the_hole.
- uint32_t len = static_cast<uint32_t>(elems->length());
- for (uint32_t i = 0; i < len; i++) ASSERT(elems->get(i)->IsTheHole());
-#endif
+
+ Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+ if (obj->IsFailure()) return obj;
+ FixedArray* elems = FixedArray::cast(obj);
+
+ obj = map()->GetFastElementsMap();
+ if (obj->IsFailure()) return obj;
+ Map* new_map = Map::cast(obj);
+
AssertNoAllocation no_gc;
WriteBarrierMode mode = elems->GetWriteBarrierMode(no_gc);
switch (GetElementsKind()) {
@@ -5508,7 +5542,15 @@
UNREACHABLE();
break;
}
+
+ set_map(new_map);
set_elements(elems);
+
+ if (IsJSArray()) {
+ JSArray::cast(this)->set_length(Smi::FromInt(length));
+ }
+
+ return this;
}
@@ -5595,7 +5637,7 @@
Object* smi_length = len->ToSmi();
if (smi_length->IsSmi()) {
- int value = Smi::cast(smi_length)->value();
+ const int value = Smi::cast(smi_length)->value();
if (value < 0) return ArrayLengthRangeError();
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
@@ -5617,12 +5659,8 @@
int new_capacity = value > min ? value : min;
if (new_capacity <= kMaxFastElementsLength ||
!ShouldConvertToSlowElements(new_capacity)) {
- Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
+ Object* obj = SetFastElementsCapacityAndLength(new_capacity, value);
if (obj->IsFailure()) return obj;
- if (IsJSArray()) {
- JSArray::cast(this)->set_length(Smi::cast(smi_length));
- }
- SetFastElements(FixedArray::cast(obj));
return this;
}
break;
@@ -5633,7 +5671,8 @@
// If the length of a slow array is reset to zero, we clear
// the array and flush backing storage. This has the added
// benefit that the array returns to fast mode.
- initialize_elements();
+ Object* obj = ResetElements();
+ if (obj->IsFailure()) return obj;
} else {
// Remove deleted elements.
uint32_t old_length =
@@ -6092,12 +6131,8 @@
if (new_capacity <= kMaxFastElementsLength ||
!ShouldConvertToSlowElements(new_capacity)) {
ASSERT(static_cast<uint32_t>(new_capacity) > index);
- Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
+ Object* obj = SetFastElementsCapacityAndLength(new_capacity, index + 1);
if (obj->IsFailure()) return obj;
- SetFastElements(FixedArray::cast(obj));
- if (IsJSArray()) {
- JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
- }
FixedArray::cast(elements())->set(index, value);
return value;
}
@@ -6196,6 +6231,15 @@
return value;
}
}
+ // When we set the is_extensible flag to false we always force
+ // the element into dictionary mode (and force them to stay there).
+ if (!map()->is_extensible()) {
+ Handle<Object> number(Heap::NumberFromUint32(index));
+ Handle<String> index_string(Factory::NumberToString(number));
+ Handle<Object> args[1] = { index_string };
+ return Top::Throw(*Factory::NewTypeError("object_not_extensible",
+ HandleVector(args, 1)));
+ }
Object* result = dictionary->AtNumberPut(index, value);
if (result->IsFailure()) return result;
if (elms != FixedArray::cast(result)) {
@@ -6216,13 +6260,11 @@
uint32_t new_length = 0;
if (IsJSArray()) {
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
- JSArray::cast(this)->set_length(Smi::FromInt(new_length));
} else {
new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
}
- Object* obj = Heap::AllocateFixedArrayWithHoles(new_length);
+ Object* obj = SetFastElementsCapacityAndLength(new_length, new_length);
if (obj->IsFailure()) return obj;
- SetFastElements(FixedArray::cast(obj));
#ifdef DEBUG
if (FLAG_trace_normalization) {
PrintF("Object elements are fast case again:\n");
@@ -7526,14 +7568,18 @@
}
// Convert to fast elements.
+ Object* obj = map()->GetFastElementsMap();
+ if (obj->IsFailure()) return obj;
+ Map* new_map = Map::cast(obj);
+
PretenureFlag tenure = Heap::InNewSpace(this) ? NOT_TENURED: TENURED;
Object* new_array =
Heap::AllocateFixedArray(dict->NumberOfElements(), tenure);
- if (new_array->IsFailure()) {
- return new_array;
- }
+ if (new_array->IsFailure()) return new_array;
FixedArray* fast_elements = FixedArray::cast(new_array);
dict->CopyValuesTo(fast_elements);
+
+ set_map(new_map);
set_elements(fast_elements);
}
ASSERT(HasFastElements());
diff --git a/src/objects.h b/src/objects.h
index 0c14665..15cfd5c 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1191,6 +1191,7 @@
// case, and a PixelArray or ExternalArray in special cases.
DECL_ACCESSORS(elements, HeapObject)
inline void initialize_elements();
+ inline Object* ResetElements();
inline ElementsKind GetElementsKind();
inline bool HasFastElements();
inline bool HasDictionaryElements();
@@ -1366,8 +1367,9 @@
// Returns the index'th element.
// The undefined object if index is out of bounds.
Object* GetElementWithReceiver(JSObject* receiver, uint32_t index);
+ Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
- void SetFastElements(FixedArray* elements);
+ Object* SetFastElementsCapacityAndLength(int capacity, int length);
Object* SetSlowElements(Object* length);
// Lookup interceptors are used for handling properties controlled by host
@@ -1515,6 +1517,10 @@
// Casting.
static inline JSObject* cast(Object* obj);
+ // Disalow further properties to be added to the object.
+ Object* PreventExtensions();
+
+
// Dispatched behavior.
void JSObjectIterateBody(int object_size, ObjectVisitor* v);
void JSObjectShortPrint(StringStream* accumulator);
@@ -1546,6 +1552,11 @@
#endif
Object* SlowReverseLookup(Object* value);
+ // Maximal number of fast properties for the JSObject. Used to
+ // restrict the number of map transitions to avoid an explosion in
+ // the number of maps for objects used as dictionaries.
+ inline int MaxFastProperties();
+
// Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
// Also maximal value of JSArray's length property.
static const uint32_t kMaxElementCount = 0xffffffffu;
@@ -1567,8 +1578,6 @@
STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize);
- Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
-
private:
Object* GetElementWithCallback(Object* receiver,
Object* structure,
@@ -2764,11 +2773,13 @@
InLoopFlag in_loop = NOT_IN_LOOP,
InlineCacheState ic_state = UNINITIALIZED,
PropertyType type = NORMAL,
- int argc = -1);
+ int argc = -1,
+ InlineCacheHolderFlag holder = OWN_MAP);
static inline Flags ComputeMonomorphicFlags(
Kind kind,
PropertyType type,
+ InlineCacheHolderFlag holder = OWN_MAP,
InLoopFlag in_loop = NOT_IN_LOOP,
int argc = -1);
@@ -2777,6 +2788,7 @@
static inline InLoopFlag ExtractICInLoopFromFlags(Flags flags);
static inline PropertyType ExtractTypeFromFlags(Flags flags);
static inline int ExtractArgumentsCountFromFlags(Flags flags);
+ static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
static inline Flags RemoveTypeFromFlags(Flags flags);
// Convert a target address into a code object.
@@ -2863,16 +2875,18 @@
static const int kFlagsICInLoopShift = 3;
static const int kFlagsTypeShift = 4;
static const int kFlagsKindShift = 7;
- static const int kFlagsArgumentsCountShift = 11;
+ static const int kFlagsICHolderShift = 11;
+ static const int kFlagsArgumentsCountShift = 12;
static const int kFlagsICStateMask = 0x00000007; // 00000000111
static const int kFlagsICInLoopMask = 0x00000008; // 00000001000
static const int kFlagsTypeMask = 0x00000070; // 00001110000
static const int kFlagsKindMask = 0x00000780; // 11110000000
- static const int kFlagsArgumentsCountMask = 0xFFFFF800;
+ static const int kFlagsCacheInPrototypeMapMask = 0x00000800;
+ static const int kFlagsArgumentsCountMask = 0xFFFFF000;
static const int kFlagsNotUsedInLookup =
- (kFlagsICInLoopMask | kFlagsTypeMask);
+ (kFlagsICInLoopMask | kFlagsTypeMask | kFlagsCacheInPrototypeMapMask);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
@@ -2979,12 +2993,20 @@
return ((1 << kHasInstanceCallHandler) & bit_field()) != 0;
}
- inline void set_is_extensible() {
- set_bit_field2(bit_field2() | (1 << kIsExtensible));
+ inline void set_is_extensible(bool value);
+ inline bool is_extensible();
+
+ // Tells whether the instance has fast elements.
+ void set_has_fast_elements(bool value) {
+ if (value) {
+ set_bit_field2(bit_field2() | (1 << kHasFastElements));
+ } else {
+ set_bit_field2(bit_field2() & ~(1 << kHasFastElements));
+ }
}
- inline bool is_extensible() {
- return ((1 << kIsExtensible) & bit_field2()) != 0;
+ bool has_fast_elements() {
+ return ((1 << kHasFastElements) & bit_field2()) != 0;
}
// Tells whether the instance needs security checks when accessing its
@@ -3010,6 +3032,16 @@
// instance descriptors.
Object* CopyDropTransitions();
+ // Returns this map if it has the fast elements bit set, otherwise
+ // returns a copy of the map, with all transitions dropped from the
+ // descriptors and the fast elements bit set.
+ inline Object* GetFastElementsMap();
+
+ // Returns this map if it has the fast elements bit cleared,
+ // otherwise returns a copy of the map, with all transitions dropped
+ // from the descriptors and the fast elements bit cleared.
+ inline Object* GetSlowElementsMap();
+
// Returns the property index for name (only valid for FAST MODE).
int PropertyIndexFor(String* name);
@@ -3111,6 +3143,7 @@
// Bit positions for bit field 2
static const int kIsExtensible = 0;
static const int kFunctionWithPrototype = 1;
+ static const int kHasFastElements = 2;
// Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2;
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index fc65947..09797ca 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -120,8 +120,6 @@
int start_offset = previous_index;
int end_offset = subject_ptr->length();
- bool is_ascii = subject->IsAsciiRepresentation();
-
// The string has been flattened, so it it is a cons string it contains the
// full string in the first part.
if (StringShape(subject_ptr).IsCons()) {
@@ -129,7 +127,7 @@
subject_ptr = ConsString::cast(subject_ptr)->first();
}
// Ensure that an underlying string has the same ascii-ness.
- ASSERT(subject_ptr->IsAsciiRepresentation() == is_ascii);
+ bool is_ascii = subject_ptr->IsAsciiRepresentation();
ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
// String is now either Sequential or External
int char_size_shift = is_ascii ? 0 : 1;
diff --git a/src/regexp.js b/src/regexp.js
index 9367f15..58e620d 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -230,7 +230,10 @@
var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
if (matchIndices == null) {
- if (this.global) this.lastIndex = 0;
+ if (this.global) {
+ this.lastIndex = 0;
+ if (lastIndex != 0) return matchIndices;
+ }
cache.lastIndex = lastIndex;
cache.regExp = this;
cache.subject = s;
@@ -293,14 +296,14 @@
return cache.answer;
}
- // Remove irrelevant preceeding '.*' in a test regexp. The expression
- // checks whether this.source starts with '.*' and that the third
+ // Remove irrelevant preceeding '.*' in a test regexp. The expression
+ // checks whether this.source starts with '.*' and that the third
// char is not a '?'
if (%_StringCharCodeAt(this.source,0) == 46 && // '.'
- %_StringCharCodeAt(this.source,1) == 42 && // '*'
- %_StringCharCodeAt(this.source,2) != 63) { // '?'
+ %_StringCharCodeAt(this.source,1) == 42 && // '*'
+ %_StringCharCodeAt(this.source,2) != 63) { // '?'
if (!%_ObjectEquals(regexp_key, this)) {
- regexp_key = this;
+ regexp_key = this;
regexp_val = new $RegExp(this.source.substring(2, this.source.length),
(this.global ? 'g' : '')
+ (this.ignoreCase ? 'i' : '')
@@ -308,7 +311,7 @@
}
if (!regexp_val.test(s)) return false;
}
-
+
var length = s.length;
var i = this.global ? TO_INTEGER(lastIndex) : 0;
diff --git a/src/rewriter.cc b/src/rewriter.cc
index c97408e..73301b9 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -87,11 +87,13 @@
void AstOptimizer::VisitExpressionStatement(ExpressionStatement* node) {
+ node->expression()->set_no_negative_zero(true);
Visit(node->expression());
}
void AstOptimizer::VisitIfStatement(IfStatement* node) {
+ node->condition()->set_no_negative_zero(true);
Visit(node->condition());
Visit(node->then_statement());
if (node->HasElseStatement()) {
@@ -101,6 +103,7 @@
void AstOptimizer::VisitDoWhileStatement(DoWhileStatement* node) {
+ node->cond()->set_no_negative_zero(true);
Visit(node->cond());
Visit(node->body());
}
@@ -108,6 +111,7 @@
void AstOptimizer::VisitWhileStatement(WhileStatement* node) {
has_function_literal_ = false;
+ node->cond()->set_no_negative_zero(true);
Visit(node->cond());
node->may_have_function_literal_ = has_function_literal_;
Visit(node->body());
@@ -120,6 +124,7 @@
}
if (node->cond() != NULL) {
has_function_literal_ = false;
+ node->cond()->set_no_negative_zero(true);
Visit(node->cond());
node->may_have_function_literal_ = has_function_literal_;
}
@@ -151,6 +156,7 @@
void AstOptimizer::VisitSwitchStatement(SwitchStatement* node) {
+ node->tag()->set_no_negative_zero(true);
Visit(node->tag());
for (int i = 0; i < node->cases()->length(); i++) {
CaseClause* clause = node->cases()->at(i);
@@ -444,6 +450,7 @@
if (FLAG_safe_int32_compiler) {
switch (node->op()) {
case Token::BIT_NOT:
+ node->expression()->set_no_negative_zero(true);
node->expression()->set_to_int32(true);
// Fall through.
case Token::ADD:
@@ -476,10 +483,49 @@
}
+static bool CouldBeNegativeZero(AstNode* node) {
+ Literal* literal = node->AsLiteral();
+ if (literal != NULL) {
+ Handle<Object> handle = literal->handle();
+ if (handle->IsString() || handle->IsSmi()) {
+ return false;
+ } else if (handle->IsHeapNumber()) {
+ double double_value = HeapNumber::cast(*handle)->value();
+ if (double_value != 0) {
+ return false;
+ }
+ }
+ }
+ BinaryOperation* binary = node->AsBinaryOperation();
+ if (binary != NULL && Token::IsBitOp(binary->op())) {
+ return false;
+ }
+ return true;
+}
+
+
+static bool CouldBePositiveZero(AstNode* node) {
+ Literal* literal = node->AsLiteral();
+ if (literal != NULL) {
+ Handle<Object> handle = literal->handle();
+ if (handle->IsSmi()) {
+ if (Smi::cast(*handle) != Smi::FromInt(0)) {
+ return false;
+ }
+ } else if (handle->IsHeapNumber()) {
+ // Heap number literal can't be +0, because that's a Smi.
+ return false;
+ }
+ }
+ return true;
+}
+
+
void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
// Depending on the operation we can propagate this node's type down the
// AST nodes.
- switch (node->op()) {
+ Token::Value op = node->op();
+ switch (op) {
case Token::COMMA:
case Token::OR:
node->left()->set_no_negative_zero(true);
@@ -503,23 +549,54 @@
node->left()->set_no_negative_zero(true);
node->right()->set_no_negative_zero(true);
break;
+ case Token::MUL: {
+ VariableProxy* lvar_proxy = node->left()->AsVariableProxy();
+ VariableProxy* rvar_proxy = node->right()->AsVariableProxy();
+ if (lvar_proxy != NULL && rvar_proxy != NULL) {
+ Variable* lvar = lvar_proxy->AsVariable();
+ Variable* rvar = rvar_proxy->AsVariable();
+ if (lvar != NULL && rvar != NULL) {
+ if (lvar->mode() == Variable::VAR && rvar->mode() == Variable::VAR) {
+ Slot* lslot = lvar->slot();
+ Slot* rslot = rvar->slot();
+ if (lslot->type() == rslot->type() &&
+ (lslot->type() == Slot::PARAMETER ||
+ lslot->type() == Slot::LOCAL) &&
+ lslot->index() == rslot->index()) {
+ // A number squared doesn't give negative zero.
+ node->set_no_negative_zero(true);
+ }
+ }
+ }
+ }
+ }
case Token::ADD:
case Token::SUB:
- case Token::MUL:
case Token::DIV:
- case Token::MOD:
+ case Token::MOD: {
if (node->type()->IsLikelySmi()) {
node->left()->type()->SetAsLikelySmiIfUnknown();
node->right()->type()->SetAsLikelySmiIfUnknown();
}
- node->left()->set_no_negative_zero(node->no_negative_zero());
- node->right()->set_no_negative_zero(node->no_negative_zero());
+ if (op == Token::ADD && (!CouldBeNegativeZero(node->left()) ||
+ !CouldBeNegativeZero(node->right()))) {
+ node->left()->set_no_negative_zero(true);
+ node->right()->set_no_negative_zero(true);
+ } else if (op == Token::SUB && (!CouldBeNegativeZero(node->left()) ||
+ !CouldBePositiveZero(node->right()))) {
+ node->left()->set_no_negative_zero(true);
+ node->right()->set_no_negative_zero(true);
+ } else {
+ node->left()->set_no_negative_zero(node->no_negative_zero());
+ node->right()->set_no_negative_zero(node->no_negative_zero());
+ }
if (node->op() == Token::DIV) {
node->right()->set_no_negative_zero(false);
} else if (node->op() == Token::MOD) {
node->right()->set_no_negative_zero(true);
}
break;
+ }
default:
UNREACHABLE();
break;
@@ -530,7 +607,7 @@
// After visiting the operand nodes we have to check if this node's type
// can be updated. If it does, then we can push that information down
- // towards the leafs again if the new information is an upgrade over the
+ // towards the leaves again if the new information is an upgrade over the
// previous type of the operand nodes.
if (node->type()->IsUnknown()) {
if (node->left()->type()->IsLikelySmi() ||
@@ -590,7 +667,7 @@
void AstOptimizer::VisitCompareOperation(CompareOperation* node) {
if (node->type()->IsKnown()) {
- // Propagate useful information down towards the leafs.
+ // Propagate useful information down towards the leaves.
node->left()->type()->SetAsLikelySmiIfUnknown();
node->right()->type()->SetAsLikelySmiIfUnknown();
}
@@ -604,7 +681,7 @@
// After visiting the operand nodes we have to check if this node's type
// can be updated. If it does, then we can push that information down
- // towards the leafs again if the new information is an upgrade over the
+ // towards the leaves again if the new information is an upgrade over the
// previous type of the operand nodes.
if (node->type()->IsUnknown()) {
if (node->left()->type()->IsLikelySmi() ||
diff --git a/src/runtime.cc b/src/runtime.cc
index 71148e6..a3eb09f 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -678,6 +678,12 @@
}
+static Object* Runtime_PreventExtensions(Arguments args) {
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSObject, obj, args[0]);
+ return obj->PreventExtensions();
+}
+
static Object* Runtime_IsExtensible(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSObject, obj, args[0]);
@@ -2782,13 +2788,17 @@
// algorithm is unnecessary overhead.
if (pattern_length == 1) {
AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
- if (sub->IsAsciiRepresentation()) {
+ String* seq_sub = *sub;
+ if (seq_sub->IsConsString()) {
+ seq_sub = ConsString::cast(seq_sub)->first();
+ }
+ if (seq_sub->IsAsciiRepresentation()) {
uc16 pchar = pat->Get(0);
if (pchar > String::kMaxAsciiCharCode) {
return -1;
}
Vector<const char> ascii_vector =
- sub->ToAsciiVector().SubVector(start_index, subject_length);
+ seq_sub->ToAsciiVector().SubVector(start_index, subject_length);
const void* pos = memchr(ascii_vector.start(),
static_cast<const char>(pchar),
static_cast<size_t>(ascii_vector.length()));
@@ -2798,7 +2808,9 @@
return static_cast<int>(reinterpret_cast<const char*>(pos)
- ascii_vector.start() + start_index);
}
- return SingleCharIndexOf(sub->ToUC16Vector(), pat->Get(0), start_index);
+ return SingleCharIndexOf(seq_sub->ToUC16Vector(),
+ pat->Get(0),
+ start_index);
}
if (!pat->IsFlat()) {
@@ -2806,19 +2818,29 @@
}
AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
+ // Extract flattened substrings of cons strings before determining asciiness.
+ String* seq_sub = *sub;
+ if (seq_sub->IsConsString()) {
+ seq_sub = ConsString::cast(seq_sub)->first();
+ }
+ String* seq_pat = *pat;
+ if (seq_pat->IsConsString()) {
+ seq_pat = ConsString::cast(seq_pat)->first();
+ }
+
// dispatch on type of strings
- if (pat->IsAsciiRepresentation()) {
- Vector<const char> pat_vector = pat->ToAsciiVector();
- if (sub->IsAsciiRepresentation()) {
- return StringSearch(sub->ToAsciiVector(), pat_vector, start_index);
+ if (seq_pat->IsAsciiRepresentation()) {
+ Vector<const char> pat_vector = seq_pat->ToAsciiVector();
+ if (seq_sub->IsAsciiRepresentation()) {
+ return StringSearch(seq_sub->ToAsciiVector(), pat_vector, start_index);
}
- return StringSearch(sub->ToUC16Vector(), pat_vector, start_index);
+ return StringSearch(seq_sub->ToUC16Vector(), pat_vector, start_index);
}
- Vector<const uc16> pat_vector = pat->ToUC16Vector();
- if (sub->IsAsciiRepresentation()) {
- return StringSearch(sub->ToAsciiVector(), pat_vector, start_index);
+ Vector<const uc16> pat_vector = seq_pat->ToUC16Vector();
+ if (seq_sub->IsAsciiRepresentation()) {
+ return StringSearch(seq_sub->ToAsciiVector(), pat_vector, start_index);
}
- return StringSearch(sub->ToUC16Vector(), pat_vector, start_index);
+ return StringSearch(seq_sub->ToUC16Vector(), pat_vector, start_index);
}
@@ -5346,9 +5368,6 @@
}
-
-
-
static Object* Runtime_NumberToIntegerMapMinusZero(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -7449,7 +7468,7 @@
uint32_t index_limit_;
// Index after last seen index. Always less than or equal to index_limit_.
uint32_t index_offset_;
- bool fast_elements_;
+ const bool fast_elements_;
};
@@ -7766,13 +7785,14 @@
// The backing storage array must have non-existing elements to
// preserve holes across concat operations.
storage = Factory::NewFixedArrayWithHoles(result_length);
-
+ result->set_map(*Factory::GetFastElementsMap(Handle<Map>(result->map())));
} else {
// TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
uint32_t at_least_space_for = estimate_nof_elements +
(estimate_nof_elements >> 2);
storage = Handle<FixedArray>::cast(
Factory::NewNumberDictionary(at_least_space_for));
+ result->set_map(*Factory::GetSlowElementsMap(Handle<Map>(result->map())));
}
Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length));
@@ -7822,9 +7842,19 @@
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSArray, from, args[0]);
CONVERT_CHECKED(JSArray, to, args[1]);
- to->SetContent(FixedArray::cast(from->elements()));
+ HeapObject* new_elements = from->elements();
+ Object* new_map;
+ if (new_elements->map() == Heap::fixed_array_map()) {
+ new_map = to->map()->GetFastElementsMap();
+ } else {
+ new_map = to->map()->GetSlowElementsMap();
+ }
+ if (new_map->IsFailure()) return new_map;
+ to->set_map(Map::cast(new_map));
+ to->set_elements(new_elements);
to->set_length(from->length());
- from->SetContent(Heap::empty_fixed_array());
+ Object* obj = from->ResetElements();
+ if (obj->IsFailure()) return obj;
from->set_length(Smi::FromInt(0));
return to;
}
@@ -9052,7 +9082,7 @@
// Set break point.
Debug::SetBreakPoint(shared, break_point_object_arg, &source_position);
- return Heap::undefined_value();
+ return Smi::FromInt(source_position);
}
diff --git a/src/runtime.h b/src/runtime.h
index 3d4df1b..5719fc8 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -72,6 +72,7 @@
F(GetOwnProperty, 2, 1) \
\
F(IsExtensible, 1, 1) \
+ F(PreventExtensions, 1, 1)\
\
/* Utilities */ \
F(GetFunctionDelegate, 1, 1) \
diff --git a/src/runtime.js b/src/runtime.js
index 3e4d473..ab6e3e9 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -112,7 +112,7 @@
// the result when either (or both) the operands are NaN.
function COMPARE(x, ncr) {
var left;
-
+ var right;
// Fast cases for string, numbers and undefined compares.
if (IS_STRING(this)) {
if (IS_STRING(x)) return %_StringCompare(this, x);
@@ -123,14 +123,18 @@
if (IS_UNDEFINED(x)) return ncr;
left = this;
} else if (IS_UNDEFINED(this)) {
+ if (!IS_UNDEFINED(x)) {
+ %ToPrimitive(x, NUMBER_HINT);
+ }
+ return ncr;
+ } else if (IS_UNDEFINED(x)) {
+ %ToPrimitive(this, NUMBER_HINT);
return ncr;
} else {
- if (IS_UNDEFINED(x)) return ncr;
left = %ToPrimitive(this, NUMBER_HINT);
}
- // Default implementation.
- var right = %ToPrimitive(x, NUMBER_HINT);
+ right = %ToPrimitive(x, NUMBER_HINT);
if (IS_STRING(left) && IS_STRING(right)) {
return %_StringCompare(left, right);
} else {
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index ffa92dd..a654a08 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -94,6 +94,7 @@
Object* StubCache::ComputeLoadNonexistent(String* name, JSObject* receiver) {
+ ASSERT(receiver->IsGlobalObject() || receiver->HasFastProperties());
// If no global objects are present in the prototype chain, the load
// nonexistent IC stub can be shared for all names for a given map
// and we use the empty string for the map cache in that case. If
@@ -129,14 +130,16 @@
JSObject* receiver,
JSObject* holder,
int field_index) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadField(receiver, holder, field_index, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -148,14 +151,16 @@
JSObject* holder,
AccessorInfo* callback) {
ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -166,15 +171,17 @@
JSObject* receiver,
JSObject* holder,
Object* value) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadConstant(receiver, holder, value, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -184,21 +191,23 @@
Object* StubCache::ComputeLoadInterceptor(String* name,
JSObject* receiver,
JSObject* holder) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
}
-Object* StubCache::ComputeLoadNormal(String* name, JSObject* receiver) {
+Object* StubCache::ComputeLoadNormal() {
return Builtins::builtin(Builtins::LoadIC_Normal);
}
@@ -208,8 +217,10 @@
GlobalObject* holder,
JSGlobalPropertyCell* cell,
bool is_dont_delete) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadGlobal(receiver,
@@ -219,7 +230,7 @@
is_dont_delete);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -230,14 +241,16 @@
JSObject* receiver,
JSObject* holder,
int field_index) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadField(name, receiver, holder, field_index);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -248,15 +261,17 @@
JSObject* receiver,
JSObject* holder,
Object* value) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadConstant(name, receiver, holder, value);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -266,15 +281,17 @@
Object* StubCache::ComputeKeyedLoadInterceptor(String* name,
JSObject* receiver,
JSObject* holder) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -285,15 +302,17 @@
JSObject* receiver,
JSObject* holder,
AccessorInfo* callback) {
+ ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+ Map* map = receiver->map();
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -305,13 +324,15 @@
JSArray* receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ ASSERT(receiver->IsJSObject());
+ Map* map = receiver->map();
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadArrayLength(name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -322,13 +343,14 @@
String* receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Map* map = receiver->map();
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadStringLength(name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -339,13 +361,14 @@
JSFunction* receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Map* map = receiver->map();
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadFunctionPrototype(name);
if (code->IsFailure()) return code;
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -371,6 +394,11 @@
}
+Object* StubCache::ComputeStoreNormal() {
+ return Builtins::builtin(Builtins::StoreIC_Normal);
+}
+
+
Object* StubCache::ComputeStoreGlobal(String* name,
GlobalObject* receiver,
JSGlobalPropertyCell* cell) {
@@ -380,7 +408,7 @@
StoreStubCompiler compiler;
code = compiler.CompileStoreGlobal(receiver, cell, name);
if (code->IsFailure()) return code;
- PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+ PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
@@ -451,7 +479,9 @@
JSObject* holder,
JSFunction* function) {
// Compute the check type and the map.
- Map* map = IC::GetCodeCacheMapForObject(object);
+ InlineCacheHolderFlag cache_holder =
+ IC::GetCodeCacheForObject(object, holder);
+ Map* map = IC::GetCodeCacheMap(object, cache_holder);
// Compute check type based on receiver/holder.
StubCompiler::CheckType check = StubCompiler::RECEIVER_MAP_CHECK;
@@ -466,6 +496,7 @@
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
CONSTANT_FUNCTION,
+ cache_holder,
in_loop,
argc);
Object* code = map->FindInCodeCache(name, flags);
@@ -476,7 +507,7 @@
// caches.
if (!function->is_compiled()) return Failure::InternalError();
// Compile the stub - only create stubs for fully compiled functions.
- CallStubCompiler compiler(argc, in_loop, kind);
+ CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
code = compiler.CompileCallConstant(object, holder, function, name, check);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
@@ -497,7 +528,9 @@
JSObject* holder,
int index) {
// Compute the check type and the map.
- Map* map = IC::GetCodeCacheMapForObject(object);
+ InlineCacheHolderFlag cache_holder =
+ IC::GetCodeCacheForObject(object, holder);
+ Map* map = IC::GetCodeCacheMap(object, cache_holder);
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
@@ -508,11 +541,12 @@
Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
FIELD,
+ cache_holder,
in_loop,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- CallStubCompiler compiler(argc, in_loop, kind);
+ CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
code = compiler.CompileCallField(JSObject::cast(object),
holder,
index,
@@ -534,8 +568,9 @@
Object* object,
JSObject* holder) {
// Compute the check type and the map.
- // If the object is a value, we use the prototype map for the cache.
- Map* map = IC::GetCodeCacheMapForObject(object);
+ InlineCacheHolderFlag cache_holder =
+ IC::GetCodeCacheForObject(object, holder);
+ Map* map = IC::GetCodeCacheMap(object, cache_holder);
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
@@ -547,11 +582,12 @@
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
INTERCEPTOR,
+ cache_holder,
NOT_IN_LOOP,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
- CallStubCompiler compiler(argc, NOT_IN_LOOP, kind);
+ CallStubCompiler compiler(argc, NOT_IN_LOOP, kind, cache_holder);
code = compiler.CompileCallInterceptor(JSObject::cast(object),
holder,
name);
@@ -585,25 +621,29 @@
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function) {
+ InlineCacheHolderFlag cache_holder =
+ IC::GetCodeCacheForObject(receiver, holder);
+ Map* map = IC::GetCodeCacheMap(receiver, cache_holder);
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
NORMAL,
+ cache_holder,
in_loop,
argc);
- Object* code = receiver->map()->FindInCodeCache(name, flags);
+ Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
// because it may cause GC. To avoid this issue, we return an
// internal error which will make sure we do not update any
// caches.
if (!function->is_compiled()) return Failure::InternalError();
- CallStubCompiler compiler(argc, in_loop, kind);
+ CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
code = compiler.CompileCallGlobal(receiver, holder, cell, function, name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
- Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+ Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
}
return code;
@@ -1203,6 +1243,17 @@
}
+CallStubCompiler::CallStubCompiler(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind,
+ InlineCacheHolderFlag cache_holder)
+ : arguments_(argc)
+ , in_loop_(in_loop)
+ , kind_(kind)
+ , cache_holder_(cache_holder) {
+}
+
+
Object* CallStubCompiler::CompileCustomCall(int generator_id,
Object* object,
JSObject* holder,
@@ -1230,6 +1281,7 @@
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
type,
+ cache_holder_,
in_loop_,
argc);
return GetCodeWithFlags(flags, name);
diff --git a/src/stub-cache.h b/src/stub-cache.h
index fcfffcf..856904a 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -77,7 +77,7 @@
JSObject* receiver,
JSObject* holder);
- static Object* ComputeLoadNormal(String* name, JSObject* receiver);
+ static Object* ComputeLoadNormal();
static Object* ComputeLoadGlobal(String* name,
@@ -121,6 +121,8 @@
int field_index,
Map* transition = NULL);
+ static Object* ComputeStoreNormal();
+
static Object* ComputeStoreGlobal(String* name,
GlobalObject* receiver,
JSGlobalPropertyCell* cell);
@@ -407,8 +409,21 @@
static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
- // Check the integrity of the prototype chain to make sure that the
- // current IC is still valid.
+ // Generates code that verifies that the property holder has not changed
+ // (checking maps of objects in the prototype chain for fast and global
+ // objects or doing negative lookup for slow objects, ensures that the
+ // property cells for global objects are still empty) and checks that the map
+ // of the holder has not changed. If necessary the function also generates
+ // code for security check in case of global object holders. Helps to make
+ // sure that the current IC is still valid.
+ //
+ // The scratch and holder registers are always clobbered, but the object
+ // register is only clobbered if it the same as the holder register. The
+ // function returns a register containing the holder - either object_reg or
+ // holder_reg.
+ // The function can optionally (when save_at_depth !=
+ // kInvalidProtoDepth) save the object at the given depth by moving
+ // it to [esp + kPointerSize].
Register CheckPrototypes(JSObject* object,
Register object_reg,
@@ -416,9 +431,10 @@
Register holder_reg,
Register scratch,
String* name,
- Label* miss) {
+ Label* miss,
+ Register extra = no_reg) {
return CheckPrototypes(object, object_reg, holder, holder_reg, scratch,
- name, kInvalidProtoDepth, miss);
+ name, kInvalidProtoDepth, miss, extra);
}
Register CheckPrototypes(JSObject* object,
@@ -428,7 +444,8 @@
Register scratch,
String* name,
int save_at_depth,
- Label* miss);
+ Label* miss,
+ Register extra = no_reg);
protected:
Object* GetCodeWithFlags(Code::Flags flags, const char* name);
@@ -611,8 +628,10 @@
kNumCallGenerators
};
- CallStubCompiler(int argc, InLoopFlag in_loop, Code::Kind kind)
- : arguments_(argc), in_loop_(in_loop), kind_(kind) { }
+ CallStubCompiler(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind,
+ InlineCacheHolderFlag cache_holder);
Object* CompileCallField(JSObject* object,
JSObject* holder,
@@ -653,6 +672,7 @@
const ParameterCount arguments_;
const InLoopFlag in_loop_;
const Code::Kind kind_;
+ const InlineCacheHolderFlag cache_holder_;
const ParameterCount& arguments() { return arguments_; }
diff --git a/src/type-info.h b/src/type-info.h
index 9d20bc1..91ecab8 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -79,7 +79,7 @@
// Decode compact representation. Very sensitive to enum values below!
static TypeInfo ExpandedRepresentation(int three_bit_representation) {
- Type t = static_cast<Type>(three_bit_representation >= 6 ?
+ Type t = static_cast<Type>(three_bit_representation > 4 ?
three_bit_representation + 2 :
three_bit_representation);
t = (t == kUnknownType) ? t : static_cast<Type>(t | kPrimitiveType);
diff --git a/src/utils.h b/src/utils.h
index d7c5b70..236b85e 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -587,7 +587,7 @@
// Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying.
// TODO(lrn): Try to find a more precise value.
-static const int kMinComplexMemCopy = 256;
+static const int kMinComplexMemCopy = 64;
#else // V8_TARGET_ARCH_IA32
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 10b8102..509de3d 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -153,6 +153,12 @@
SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
+ SC(store_normal_miss, V8.StoreNormalMiss) \
+ SC(store_normal_hit, V8.StoreNormalHit) \
+ SC(call_miss, V8.CallMiss) \
+ SC(keyed_call_miss, V8.KeyedCallMiss) \
+ SC(load_miss, V8.LoadMiss) \
+ SC(keyed_load_miss, V8.KeyedLoadMiss) \
SC(call_const, V8.CallConst) \
SC(call_const_fast_api, V8.CallConstFastApi) \
SC(call_const_interceptor, V8.CallConstInterceptor) \
@@ -162,6 +168,8 @@
SC(constructed_objects, V8.ConstructedObjects) \
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
+ SC(negative_lookups, V8.NegativeLookups) \
+ SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
SC(array_function_runtime, V8.ArrayFunctionRuntime) \
SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \
diff --git a/src/v8dll-main.cc b/src/v8dll-main.cc
new file mode 100644
index 0000000..3d4b3a3
--- /dev/null
+++ b/src/v8dll-main.cc
@@ -0,0 +1,39 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <windows.h>
+
+#include "../include/v8.h"
+
+extern "C" {
+BOOL WINAPI DllMain(HANDLE hinstDLL,
+ DWORD dwReason,
+ LPVOID lpvReserved) {
+ // Do nothing.
+ return TRUE;
+}
+}
diff --git a/src/v8natives.js b/src/v8natives.js
index 1d47eb7..487faab 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -677,9 +677,20 @@
}
}
- // Property names are expected to be strings.
- for (var i = 0; i < propertyNames.length; ++i)
- propertyNames[i] = ToString(propertyNames[i]);
+ // Property names are expected to be unique strings.
+ var propertySet = {};
+ var j = 0;
+ for (var i = 0; i < propertyNames.length; ++i) {
+ var name = ToString(propertyNames[i]);
+ // We need to check for the exact property value since for intrinsic
+ // properties like toString if(propertySet["toString"]) will always
+ // succeed.
+ if (propertySet[name] === true)
+ continue;
+ propertySet[name] = true;
+ propertyNames[j++] = name;
+ }
+ propertyNames.length = j;
return propertyNames;
}
@@ -734,6 +745,27 @@
}
+// ES5 section 15.2.3.10
+function ObjectPreventExtension(obj) {
+ if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
+ !IS_UNDETECTABLE(obj)) {
+ throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
+ }
+ %PreventExtensions(obj);
+ return obj;
+}
+
+
+// ES5 section 15.2.3.13
+function ObjectIsExtensible(obj) {
+ if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
+ !IS_UNDETECTABLE(obj)) {
+ throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
+ }
+ return %IsExtensible(obj);
+}
+
+
%SetCode($Object, function(x) {
if (%_IsConstructCall()) {
if (x == null) return this;
@@ -769,7 +801,9 @@
"defineProperties", ObjectDefineProperties,
"getPrototypeOf", ObjectGetPrototypeOf,
"getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
- "getOwnPropertyNames", ObjectGetOwnPropertyNames
+ "getOwnPropertyNames", ObjectGetOwnPropertyNames,
+ "isExtensible", ObjectIsExtensible,
+ "preventExtensions", ObjectPreventExtension
));
}
diff --git a/src/version.cc b/src/version.cc
index c9e8411..db604e0 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
-#define BUILD_NUMBER 19
+#define BUILD_NUMBER 22
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index e665385..c19e2ba 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -376,8 +376,13 @@
void Assembler::Align(int m) {
ASSERT(IsPowerOf2(m));
- while ((pc_offset() & (m - 1)) != 0) {
- nop();
+ int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
+ while (delta >= 9) {
+ nop(9);
+ delta -= 9;
+ }
+ if (delta > 0) {
+ nop(delta);
}
}
@@ -837,9 +842,7 @@
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: FF /2 r64.
- if (adr.high_bit()) {
- emit_rex_64(adr);
- }
+ emit_optional_rex_32(adr);
emit(0xFF);
emit_modrm(0x2, adr);
}
@@ -849,9 +852,9 @@
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: FF /2 m64.
- emit_rex_64(op);
+ emit_optional_rex_32(op);
emit(0xFF);
- emit_operand(2, op);
+ emit_operand(0x2, op);
}
@@ -1270,9 +1273,7 @@
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode FF/4 r64.
- if (target.high_bit()) {
- emit_rex_64(target);
- }
+ emit_optional_rex_32(target);
emit(0xFF);
emit_modrm(0x4, target);
}
@@ -1562,7 +1563,7 @@
void Assembler::movsxbq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_32(dst, src);
+ emit_rex_64(dst, src);
emit(0x0F);
emit(0xBE);
emit_operand(dst, src);
@@ -1600,7 +1601,7 @@
void Assembler::movzxbq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(dst, src);
+ emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB6);
emit_operand(dst, src);
@@ -1620,7 +1621,7 @@
void Assembler::movzxwq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(dst, src);
+ emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
emit_operand(dst, src);
@@ -1831,9 +1832,7 @@
void Assembler::pop(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- if (dst.high_bit()) {
- emit_rex_64(dst);
- }
+ emit_optional_rex_32(dst);
emit(0x58 | dst.low_bits());
}
@@ -1841,7 +1840,7 @@
void Assembler::pop(const Operand& dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(dst); // Could be omitted in some cases.
+ emit_optional_rex_32(dst);
emit(0x8F);
emit_operand(0, dst);
}
@@ -1857,9 +1856,7 @@
void Assembler::push(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- if (src.high_bit()) {
- emit_rex_64(src);
- }
+ emit_optional_rex_32(src);
emit(0x50 | src.low_bits());
}
@@ -1867,7 +1864,7 @@
void Assembler::push(const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_rex_64(src); // Could be omitted in some cases.
+ emit_optional_rex_32(src);
emit(0xFF);
emit_operand(6, src);
}
@@ -2609,6 +2606,28 @@
}
+void Assembler::movss(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3); // single
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x10); // load
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movss(const Operand& src, XMMRegister dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3); // single
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x11); // store
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvttss2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2664,6 +2683,17 @@
}
+void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2686,6 +2716,50 @@
}
+void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5A);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5A);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtsd2si(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2D);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2763,6 +2837,18 @@
}
+void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x2e);
+ emit_sse_operand(dst, src);
+}
+
+
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index f195439..213db2c 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -46,23 +46,23 @@
// Test whether a 64-bit value is in a specific range.
static inline bool is_uint32(int64_t x) {
- static const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
- return x == (x & kUInt32Mask);
+ static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
+ return static_cast<uint64_t>(x) <= kMaxUInt32;
}
static inline bool is_int32(int64_t x) {
- static const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
- return is_uint32(x - kMinIntValue);
+ static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
+ return is_uint32(x - kMinInt32);
}
static inline bool uint_is_int32(uint64_t x) {
- static const uint64_t kMaxIntValue = V8_UINT64_C(0x80000000);
- return x < kMaxIntValue;
+ static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
+ return x <= kMaxInt32;
}
static inline bool is_uint32(uint64_t x) {
- static const uint64_t kMaxUIntValue = V8_UINT64_C(0x100000000);
- return x < kMaxUIntValue;
+ static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
+ return x <= kMaxUInt32;
}
// CPU Registers.
@@ -1110,6 +1110,9 @@
void movsd(XMMRegister dst, XMMRegister src);
void movsd(XMMRegister dst, const Operand& src);
+ void movss(XMMRegister dst, const Operand& src);
+ void movss(const Operand& dst, XMMRegister src);
+
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2siq(Register dst, XMMRegister src);
@@ -1119,7 +1122,14 @@
void cvtqsi2sd(XMMRegister dst, const Operand& src);
void cvtqsi2sd(XMMRegister dst, Register src);
+ void cvtlsi2ss(XMMRegister dst, Register src);
+
void cvtss2sd(XMMRegister dst, XMMRegister src);
+ void cvtss2sd(XMMRegister dst, const Operand& src);
+ void cvtsd2ss(XMMRegister dst, XMMRegister src);
+
+ void cvtsd2si(Register dst, XMMRegister src);
+ void cvtsd2siq(Register dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
void subsd(XMMRegister dst, XMMRegister src);
@@ -1130,6 +1140,7 @@
void sqrtsd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
+ void ucomisd(XMMRegister dst, const Operand& src);
// The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src);
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index ff655c7..a38ebaf 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1238,10 +1238,6 @@
__ movq(rbx, r8);
#endif // _WIN64
- // Set up the roots register.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(kRootRegister, roots_address);
-
// Current stack contents:
// [rsp + 2 * kPointerSize ... ]: Internal frame
// [rsp + kPointerSize] : function
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 3ba8906..3b1aeae 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -592,7 +592,6 @@
&& (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
&& (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
&& (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
- && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
&& (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
}
#endif
@@ -856,7 +855,7 @@
__ j(equal, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments.
- __ movq(rax, Immediate(scope()->num_parameters()));
+ __ Set(rax, scope()->num_parameters());
for (int i = 0; i < scope()->num_parameters(); i++) {
__ push(frame_->ParameterAt(i));
}
@@ -1600,11 +1599,133 @@
}
+void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
+ // A fast smi loop is a for loop with an initializer
+ // that is a simple assignment of a smi to a stack variable,
+ // a test that is a simple test of that variable against a smi constant,
+ // and a step that is a increment/decrement of the variable, and
+ // where the variable isn't modified in the loop body.
+ // This guarantees that the variable is always a smi.
+
+ Variable* loop_var = node->loop_variable();
+ Smi* initial_value = *Handle<Smi>::cast(node->init()
+ ->StatementAsSimpleAssignment()->value()->AsLiteral()->handle());
+ Smi* limit_value = *Handle<Smi>::cast(
+ node->cond()->AsCompareOperation()->right()->AsLiteral()->handle());
+ Token::Value compare_op =
+ node->cond()->AsCompareOperation()->op();
+ bool increments =
+ node->next()->StatementAsCountOperation()->op() == Token::INC;
+
+ // Check that the condition isn't initially false.
+ bool initially_false = false;
+ int initial_int_value = initial_value->value();
+ int limit_int_value = limit_value->value();
+ switch (compare_op) {
+ case Token::LT:
+ initially_false = initial_int_value >= limit_int_value;
+ break;
+ case Token::LTE:
+ initially_false = initial_int_value > limit_int_value;
+ break;
+ case Token::GT:
+ initially_false = initial_int_value <= limit_int_value;
+ break;
+ case Token::GTE:
+ initially_false = initial_int_value < limit_int_value;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ if (initially_false) return;
+
+ // Only check loop condition at the end.
+
+ Visit(node->init());
+
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+ // Set type and stack height of BreakTargets.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+ IncrementLoopNesting();
+ loop.Bind();
+
+ // Set number type of the loop variable to smi.
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+
+ SetTypeForStackSlot(loop_var->slot(), TypeInfo::Smi());
+ Visit(node->body());
+
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+
+ if (has_valid_frame()) {
+ CodeForStatementPosition(node);
+ Slot* loop_var_slot = loop_var->slot();
+ if (loop_var_slot->type() == Slot::LOCAL) {
+ frame_->PushLocalAt(loop_var_slot->index());
+ } else {
+ ASSERT(loop_var_slot->type() == Slot::PARAMETER);
+ frame_->PushParameterAt(loop_var_slot->index());
+ }
+ Result loop_var_result = frame_->Pop();
+ if (!loop_var_result.is_register()) {
+ loop_var_result.ToRegister();
+ }
+
+ if (increments) {
+ __ SmiAddConstant(loop_var_result.reg(),
+ loop_var_result.reg(),
+ Smi::FromInt(1));
+ } else {
+ __ SmiSubConstant(loop_var_result.reg(),
+ loop_var_result.reg(),
+ Smi::FromInt(1));
+ }
+
+ {
+ __ SmiCompare(loop_var_result.reg(), limit_value);
+ Condition condition;
+ switch (compare_op) {
+ case Token::LT:
+ condition = less;
+ break;
+ case Token::LTE:
+ condition = less_equal;
+ break;
+ case Token::GT:
+ condition = greater;
+ break;
+ case Token::GTE:
+ condition = greater_equal;
+ break;
+ default:
+ condition = never;
+ UNREACHABLE();
+ }
+ loop.Branch(condition);
+ }
+ loop_var_result.Unuse();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ DecrementLoopNesting();
+}
+
+
void CodeGenerator::VisitForStatement(ForStatement* node) {
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ ForStatement");
CodeForStatementPosition(node);
+ if (node->is_fast_smi_loop()) {
+ GenerateFastSmiLoop(node);
+ return;
+ }
+
// Compile the init expression if present.
if (node->init() != NULL) {
Visit(node->init());
@@ -1694,16 +1815,6 @@
CheckStack(); // TODO(1222600): ignore if body contains calls.
- // We know that the loop index is a smi if it is not modified in the
- // loop body and it is checked against a constant limit in the loop
- // condition. In this case, we reset the static type information of the
- // loop index to smi before compiling the body, the update expression, and
- // the bottom check of the loop condition.
- if (node->is_fast_smi_loop()) {
- // Set number type of the loop variable to smi.
- SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
- }
-
Visit(node->body());
// If there is an update expression, compile it if necessary.
@@ -1723,13 +1834,6 @@
}
}
- // Set the type of the loop variable to smi before compiling the test
- // expression if we are in a fast smi loop condition.
- if (node->is_fast_smi_loop() && has_valid_frame()) {
- // Set number type of the loop variable to smi.
- SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
- }
-
// Based on the condition analysis, compile the backward jump as
// necessary.
switch (info) {
@@ -2641,7 +2745,7 @@
// Generate code to set the elements in the array that are not
// literals.
- for (int i = 0; i < node->values()->length(); i++) {
+ for (int i = 0; i < length; i++) {
Expression* value = node->values()->at(i);
// If value is a literal the property value is already set in the
@@ -3501,17 +3605,16 @@
__ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
}
if (is_increment) {
- __ SmiAddConstant(kScratchRegister,
+ __ SmiAddConstant(new_value.reg(),
new_value.reg(),
Smi::FromInt(1),
deferred->entry_label());
} else {
- __ SmiSubConstant(kScratchRegister,
+ __ SmiSubConstant(new_value.reg(),
new_value.reg(),
Smi::FromInt(1),
deferred->entry_label());
}
- __ movq(new_value.reg(), kScratchRegister);
deferred->BindExit();
// Postfix count operations return their input converted to
@@ -3855,8 +3958,17 @@
default:
UNREACHABLE();
}
- Load(left);
- Load(right);
+
+ if (left->IsTrivial()) {
+ Load(right);
+ Result right_result = frame_->Pop();
+ frame_->Push(left);
+ frame_->Push(&right_result);
+ } else {
+ Load(left);
+ Load(right);
+ }
+
Comparison(node, cc, strict, destination());
}
@@ -5333,14 +5445,18 @@
}
// Smi => false iff zero.
__ SmiCompare(value.reg(), Smi::FromInt(0));
- dest->false_target()->Branch(equal);
- Condition is_smi = masm_->CheckSmi(value.reg());
- dest->true_target()->Branch(is_smi);
- __ fldz();
- __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
- __ FCmp();
- value.Unuse();
- dest->Split(not_zero);
+ if (value.is_smi()) {
+ value.Unuse();
+ dest->Split(not_zero);
+ } else {
+ dest->false_target()->Branch(equal);
+ Condition is_smi = masm_->CheckSmi(value.reg());
+ dest->true_target()->Branch(is_smi);
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
+ value.Unuse();
+ dest->Split(not_zero);
+ }
} else {
// Fast case checks.
// 'false' => false.
@@ -6511,7 +6627,7 @@
void DeferredInlineBinaryOperation::Generate() {
Label done;
if ((op_ == Token::ADD)
- || (op_ ==Token::SUB)
+ || (op_ == Token::SUB)
|| (op_ == Token::MUL)
|| (op_ == Token::DIV)) {
Label call_runtime;
@@ -7530,9 +7646,11 @@
// is not a dictionary.
__ movq(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Factory::fixed_array_map());
- deferred->Branch(not_equal);
+ if (FLAG_debug_code) {
+ __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ __ Assert(equal, "JSObject with fast elements map has slow elements");
+ }
// Check that key is within bounds.
__ SmiCompare(key.reg(),
@@ -8000,14 +8118,12 @@
__ jmp(&true_result);
__ bind(¬_string);
- // HeapNumber => false iff +0, -0, or NaN.
- // These three cases set C3 when compared to zero in the FPU.
__ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &true_result);
- __ fldz(); // Load zero onto fp stack
- // Load heap-number double value onto fp stack
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ FCmp();
+ // HeapNumber => false iff +0, -0, or NaN.
+ // These three cases set the zero flag when compared to zero using ucomisd.
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
__ j(zero, &false_result);
// Fall through to |true_result|.
@@ -8609,26 +8725,26 @@
__ bind(&seq_ascii_string);
// rax: subject string (sequential ascii)
// rcx: RegExp data (FixedArray)
- __ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
__ Set(rdi, 1); // Type is ascii.
__ jmp(&check_code);
__ bind(&seq_two_byte_string);
// rax: subject string (flat two-byte)
// rcx: RegExp data (FixedArray)
- __ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
__ Set(rdi, 0); // Type is two byte.
__ bind(&check_code);
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// the hole.
- __ CmpObjectType(r12, CODE_TYPE, kScratchRegister);
+ __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// rax: subject string
// rdi: encoding of subject string (1 if ascii, 0 if two_byte);
- // r12: code
+ // r11: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
__ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
@@ -8636,7 +8752,7 @@
// rax: subject string
// rbx: previous index
// rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r12: code
+ // r11: code
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(&Counters::regexp_entry_native, 1);
@@ -8686,7 +8802,7 @@
// rax: subject string
// rbx: previous index
// rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r12: code
+ // r11: code
// Argument 4: End of string data
// Argument 3: Start of string data
@@ -8710,8 +8826,8 @@
__ movq(arg1, rax);
// Locate the code entry and call it.
- __ addq(r12, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(r12, kRegExpExecuteArguments);
+ __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(r11, kRegExpExecuteArguments);
// rsi is caller save, as it is used to pass parameter.
__ pop(rsi);
@@ -8925,7 +9041,7 @@
void CompareStub::Generate(MacroAssembler* masm) {
- Label call_builtin, done;
+ Label check_unequal_objects, done;
// The compare stub returns a positive, negative, or zero 64-bit integer
// value in rax, corresponding to result of comparing the two inputs.
// NOTICE! This code is only reached after a smi-fast-case check, so
@@ -8951,48 +9067,40 @@
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// Note: if cc_ != equal, never_nan_nan_ is not used.
+ // We cannot set rax to EQUAL until just before return because
+ // rax must be unchanged on jump to not_identical.
+
if (never_nan_nan_ && (cc_ == equal)) {
__ Set(rax, EQUAL);
__ ret(0);
} else {
- Label return_equal;
Label heap_number;
- // If it's not a heap number, then return equal.
+ // If it's not a heap number, then return equal for (in)equality operator.
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
Factory::heap_number_map());
__ j(equal, &heap_number);
- __ bind(&return_equal);
+ if (cc_ != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(above_equal, ¬_identical);
+ }
__ Set(rax, EQUAL);
__ ret(0);
__ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if
- // it's not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only allow QNaNs, which have bit 51 set (which also rules out
- // the value being Infinity).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
- __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
- __ xorl(rax, rax);
- __ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
- __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
- if (cc_ == equal) {
- __ setcc(above_equal, rax);
- __ ret(0);
- } else {
- Label nan;
- __ j(above_equal, &nan);
- __ Set(rax, EQUAL);
- __ ret(0);
- __ bind(&nan);
- __ Set(rax, NegativeComparisonResult(cc_));
- __ ret(0);
+ // It is a heap number, so return equal if it's not NaN.
+ // For NaN, return 1 for every condition except greater and
+ // greater-equal. Return -1 for them, so the comparison yields
+ // false for all conditions except not-equal.
+ __ Set(rax, EQUAL);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm0);
+ __ setcc(parity_even, rax);
+ // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
+ if (cc_ == greater_equal || cc_ == greater) {
+ __ neg(rax);
}
+ __ ret(0);
}
__ bind(¬_identical);
@@ -9067,16 +9175,16 @@
Label non_number_comparison;
Label unordered;
FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+ __ xorl(rax, rax);
+ __ xorl(rcx, rcx);
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered);
// Return a result of -1, 0, or 1, based on EFLAGS.
- __ movq(rax, Immediate(0)); // equal
- __ movq(rcx, Immediate(1));
- __ cmovq(above, rax, rcx);
- __ movq(rcx, Immediate(-1));
- __ cmovq(below, rax, rcx);
+ __ setcc(above, rax);
+ __ setcc(below, rcx);
+ __ subq(rax, rcx);
__ ret(2 * kPointerSize); // rax, rdx were pushed
// If one of the numbers was NaN, then the result is always false.
@@ -9108,7 +9216,8 @@
__ bind(&check_for_strings);
- __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &call_builtin);
+ __ JumpIfNotBothSequentialAsciiStrings(
+ rdx, rax, rcx, rbx, &check_unequal_objects);
// Inline comparison of ascii strings.
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@@ -9123,7 +9232,40 @@
__ Abort("Unexpected fall-through from string comparison");
#endif
- __ bind(&call_builtin);
+ __ bind(&check_unequal_objects);
+ if (cc_ == equal && !strict_) {
+ // Not strict equality. Objects are unequal if
+ // they are both JSObjects and not undetectable,
+ // and their pointers are different.
+ Label not_both_objects, return_unequal;
+ // At most one is a smi, so we can test for smi by adding the two.
+ // A smi plus a heap object has the low bit set, a heap object plus
+ // a heap object has the low bit clear.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(V8_UINT64_C(1), kSmiTagMask);
+ __ lea(rcx, Operand(rax, rdx, times_1, 0));
+ __ testb(rcx, Immediate(kSmiTagMask));
+ __ j(not_zero, ¬_both_objects);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ j(below, ¬_both_objects);
+ __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ j(below, ¬_both_objects);
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &return_unequal);
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &return_unequal);
+ // The objects are both undetectable, so they both compare as the value
+ // undefined, and are equal.
+ __ Set(rax, EQUAL);
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax,
+ // or return equal if we fell through to here.
+ __ ret(2 * kPointerSize); // rax, rdx were pushed
+ __ bind(¬_both_objects);
+ }
+
// must swap argument order
__ pop(rcx);
__ pop(rdx);
@@ -9483,7 +9625,7 @@
// rbp: frame pointer (restored after C call).
// rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
- // r15: pointer to the first argument (C callee-saved).
+ // r12: pointer to the first argument (C callee-saved).
// This pointer is reused in LeaveExitFrame(), so it is stored in a
// callee-saved register.
@@ -9524,7 +9666,7 @@
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
// Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
__ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
- __ movq(Operand(rsp, 5 * kPointerSize), r15); // argv.
+ __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv.
if (result_size_ < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
@@ -9540,7 +9682,7 @@
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc.
- __ movq(rsi, r15); // argv.
+ __ movq(rsi, r12); // argv.
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
@@ -9742,7 +9884,7 @@
// rbp: frame pointer of exit frame (restored after C call).
// rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
- // r15: argv pointer (C callee-saved).
+ // r12: argv pointer (C callee-saved).
Label throw_normal_exception;
Label throw_termination_exception;
@@ -9802,24 +9944,38 @@
// Push the stack frame type marker twice.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ Push(Smi::FromInt(marker)); // context slot
- __ Push(Smi::FromInt(marker)); // function slot
- // Save callee-saved registers (X64 calling conventions).
+ // Scratch register is neither callee-save, nor an argument register on any
+ // platform. It's free to use at this point.
+ // Cannot use smi-register for loading yet.
+ __ movq(kScratchRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
+ RelocInfo::NONE);
+ __ push(kScratchRegister); // context slot
+ __ push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/Win64 calling conventions).
__ push(r12);
__ push(r13);
__ push(r14);
__ push(r15);
- __ push(rdi);
- __ push(rsi);
+#ifdef _WIN64
+ __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+#endif
__ push(rbx);
- // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
- // callee-save in JS code as well.
+ // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
+ // callee save as well.
// Save copies of the top frame descriptor on the stack.
ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
__ load_rax(c_entry_fp);
__ push(rax);
+ // Set up the roots and smi constant registers.
+ // Needs to be done before any further smi loads.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ movq(kRootRegister, roots_address);
+ __ InitializeSmiConstantRegister();
+
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
@@ -9890,8 +10046,11 @@
// Restore callee-saved registers (X64 conventions).
__ pop(rbx);
+#ifdef _WIN64
+ // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
__ pop(rsi);
__ pop(rdi);
+#endif
__ pop(r15);
__ pop(r14);
__ pop(r13);
@@ -10040,20 +10199,15 @@
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
- if (FLAG_debug_code) {
- // Both arguments can not be smis. That case is handled by smi-only code.
- Label ok;
- __ JumpIfNotBothSmi(rax, rdx, &ok);
- __ Abort("Both arguments smi but not handled by smi-code.");
- __ bind(&ok);
- }
// Check float operands.
Label done;
+ Label rax_is_smi;
Label rax_is_object;
Label rdx_is_object;
__ JumpIfNotSmi(rdx, &rdx_is_object);
__ SmiToInteger32(rdx, rdx);
+ __ JumpIfSmi(rax, &rax_is_smi);
__ bind(&rax_is_object);
IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
@@ -10062,6 +10216,7 @@
__ bind(&rdx_is_object);
IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
__ JumpIfNotSmi(rax, &rax_is_object);
+ __ bind(&rax_is_smi);
__ SmiToInteger32(rcx, rax);
__ bind(&done);
@@ -10446,7 +10601,6 @@
Label not_floats;
// rax: y
// rdx: x
- ASSERT(!static_operands_type_.IsSmi());
if (static_operands_type_.IsNumber()) {
if (FLAG_debug_code) {
// Assert at runtime that inputs are only numbers.
@@ -11130,7 +11284,7 @@
// Check that both strings are non-external ascii strings.
__ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
- &string_add_runtime);
+ &string_add_runtime);
// Get the two characters forming the sub string.
__ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
@@ -11140,7 +11294,7 @@
// just allocate a new one.
Label make_two_character_string, make_flat_ascii_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, rbx, rcx, r14, r12, rdi, r15, &make_two_character_string);
+ masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
@@ -11232,7 +11386,7 @@
__ bind(&make_flat_ascii_string);
// Both strings are ascii strings. As they are short they are both flat.
- __ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
+ __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
// rcx: result string
__ movq(rbx, rcx);
// Locate first character of result.
@@ -11269,7 +11423,7 @@
__ j(not_zero, &string_add_runtime);
// Both strings are two byte strings. As they are short they are both
// flat.
- __ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
+ __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
// rcx: result string
__ movq(rbx, rcx);
// Locate first character of result.
@@ -11583,7 +11737,9 @@
__ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx, NULL); // Overflow doesn't happen.
- __ j(negative, &runtime);
+ __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
+ Label return_rax;
+ __ j(equal, &return_rax);
// Special handling of sub-strings of length 1 and 2. One character strings
// are handled in the runtime system (looked up in the single character
// cache). Two character strings are looked for in the symbol cache.
@@ -11686,6 +11842,8 @@
// rsi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
__ movq(rsi, rdx); // Restore esi.
+
+ __ bind(&return_rax);
__ IncrementCounter(&Counters::sub_string_native, 1);
__ ret(kArgumentsSize);
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index cd03d2a..b9a3b70 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -393,6 +393,9 @@
// target (which can not be done more than once).
void GenerateReturnSequence(Result* return_value);
+ // Generate code for a fast smi loop.
+ void GenerateFastSmiLoop(ForStatement* node);
+
// Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode();
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 44ffe5f..06a8c79 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -468,20 +468,20 @@
if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
// index == rsp means no index. Only use sib byte with no index for
// rsp and r12 base.
- AppendToBuffer("[%s]", (this->*register_name)(base));
+ AppendToBuffer("[%s]", NameOfCPURegister(base));
return 2;
} else if (base == 5) {
// base == rbp means no base register (when mod == 0).
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
AppendToBuffer("[%s*%d+0x%x]",
- (this->*register_name)(index),
+ NameOfCPURegister(index),
1 << scale, disp);
return 6;
} else if (index != 4 && base != 5) {
// [base+index*scale]
AppendToBuffer("[%s+%s*%d]",
- (this->*register_name)(base),
- (this->*register_name)(index),
+ NameOfCPURegister(base),
+ NameOfCPURegister(index),
1 << scale);
return 2;
} else {
@@ -489,7 +489,7 @@
return 1;
}
} else {
- AppendToBuffer("[%s]", (this->*register_name)(rm));
+ AppendToBuffer("[%s]", NameOfCPURegister(rm));
return 1;
}
break;
@@ -503,21 +503,21 @@
: *reinterpret_cast<char*>(modrmp + 2);
if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", (this->*register_name)(base), -disp);
+ AppendToBuffer("[%s-0x%x]", NameOfCPURegister(base), -disp);
} else {
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(base), disp);
+ AppendToBuffer("[%s+0x%x]", NameOfCPURegister(base), disp);
}
} else {
if (-disp > 0) {
AppendToBuffer("[%s+%s*%d-0x%x]",
- (this->*register_name)(base),
- (this->*register_name)(index),
+ NameOfCPURegister(base),
+ NameOfCPURegister(index),
1 << scale,
-disp);
} else {
AppendToBuffer("[%s+%s*%d+0x%x]",
- (this->*register_name)(base),
- (this->*register_name)(index),
+ NameOfCPURegister(base),
+ NameOfCPURegister(index),
1 << scale,
disp);
}
@@ -528,9 +528,9 @@
int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
: *reinterpret_cast<char*>(modrmp + 1);
if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", (this->*register_name)(rm), -disp);
+ AppendToBuffer("[%s-0x%x]", NameOfCPURegister(rm), -disp);
} else {
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
+ AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
}
return (mod == 2) ? 5 : 2;
}
@@ -1028,9 +1028,9 @@
if (opcode == 0x57) {
mnemonic = "xorpd";
} else if (opcode == 0x2E) {
- mnemonic = "comisd";
- } else if (opcode == 0x2F) {
mnemonic = "ucomisd";
+ } else if (opcode == 0x2F) {
+ mnemonic = "comisd";
} else {
UnimplementedInstruction();
}
@@ -1057,7 +1057,7 @@
// CVTSI2SD: integer to XMM double conversion.
int mod, regop, rm;
get_modrm(*current, &mod, ®op, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+ AppendToBuffer("%sd %s,", mnemonic, NameOfXMMRegister(regop));
current += PrintRightOperand(current);
} else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
// XMM arithmetic. Mnemonic was retrieved at the start of this function.
@@ -1070,7 +1070,25 @@
}
} else if (group_1_prefix_ == 0xF3) {
// Instructions with prefix 0xF3.
- if (opcode == 0x2C) {
+ if (opcode == 0x11 || opcode == 0x10) {
+ // MOVSS: Move scalar double-precision fp to/from/between XMM registers.
+ AppendToBuffer("movss ");
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ if (opcode == 0x11) {
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else {
+ AppendToBuffer("%s,", NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ }
+ } else if (opcode == 0x2A) {
+ // CVTSI2SS: integer to XMM single conversion.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("%ss %s,", mnemonic, NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ } else if (opcode == 0x2C) {
// CVTTSS2SI: Convert scalar single-precision FP to dword integer.
// Assert that mod is not 3, so source is memory, not an XMM register.
ASSERT_NE(0xC0, *current & 0xC0);
@@ -1146,8 +1164,8 @@
switch (opcode) {
case 0x1F:
return "nop";
- case 0x2A: // F2 prefix.
- return "cvtsi2sd";
+ case 0x2A: // F2/F3 prefix.
+ return "cvtsi2s";
case 0x31:
return "rdtsc";
case 0x51: // F2 prefix.
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index a92b248..9991981 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -56,7 +56,11 @@
class EntryFrameConstants : public AllStatic {
public:
+#ifdef _WIN64
static const int kCallerFPOffset = -10 * kPointerSize;
+#else
+ static const int kCallerFPOffset = -8 * kPointerSize;
+#endif
static const int kArgvOffset = 6 * kPointerSize;
};
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 1df1de3..e3f74f6 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1518,12 +1518,13 @@
case KEYED_PROPERTY: {
__ push(rax); // Preserve value.
VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ VisitForValue(prop->key(), kAccumulator);
+ __ movq(rcx, rax);
+ __ pop(rdx);
+ __ pop(rax);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
__ nop(); // Signal no inlined code.
- __ Drop(3); // Receiver, key, and extra copy of value.
break;
}
}
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 6e77c89..d04a7dc 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -45,71 +45,76 @@
#define __ ACCESS_MASM(masm)
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if the receiver has fast properties,
-// or if name is not a symbol, and will jump to the miss_label in that case.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register r0,
- Register r1,
- Register r2,
- Register name,
- Register r4,
- Register result,
- DictionaryCheck check_dictionary) {
- // Register use:
- //
- // r0 - used to hold the property dictionary and is unchanged.
- //
- // r1 - used to hold the receiver and is unchanged.
- //
- // r2 - used to hold the capacity of the property dictionary.
- //
- // name - holds the name of the property and is unchanged.
- //
- // r4 - used to hold the index into the property dictionary.
- //
- // result - holds the result on exit if the load succeeded.
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
+ __ j(equal, global_object);
+ __ cmpb(type, Immediate(JS_BUILTINS_OBJECT_TYPE));
+ __ j(equal, global_object);
+ __ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
+ __ j(equal, global_object);
+}
- Label done;
- // Check for the absence of an interceptor.
- // Load the map into r0.
- __ movq(r0, FieldOperand(r1, JSObject::kMapOffset));
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register r0,
+ Register r1,
+ Label* miss) {
+ // Register usage:
+ // receiver: holds the receiver on entry and is unchanged.
+ // r0: used to hold receiver instance type.
+ // Holds the property dictionary on fall through.
+ // r1: used to hold receivers map.
- // Bail out if the receiver has a named interceptor.
- __ testl(FieldOperand(r0, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNamedInterceptor));
- __ j(not_zero, miss_label);
+ __ JumpIfSmi(receiver, miss);
- // Bail out if we have a JS global proxy object.
- __ movzxbq(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
- __ cmpb(r0, Immediate(JS_GLOBAL_PROXY_TYPE));
- __ j(equal, miss_label);
+ // Check that the receiver is a valid JS object.
+ __ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
+ __ cmpb(r0, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ j(below, miss);
- // Possible work-around for http://crbug.com/16276.
- __ cmpb(r0, Immediate(JS_GLOBAL_OBJECT_TYPE));
- __ j(equal, miss_label);
- __ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE));
- __ j(equal, miss_label);
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- // Load properties array.
- __ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
+ GenerateGlobalInstanceTypeCheck(masm, r0, miss);
- if (check_dictionary == CHECK_DICTIONARY) {
- // Check that the properties array is a dictionary.
- __ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
- __ j(not_equal, miss_label);
- }
+ // Check for non-global object that requires access check.
+ __ testb(FieldOperand(r1, Map::kBitFieldOffset),
+ Immediate((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasNamedInterceptor)));
+ __ j(not_zero, miss);
+ __ movq(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ __ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(not_equal, miss);
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r1|. Jump to the |miss| label
+// otherwise.
+static void GenerateStringDictionaryProbes(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1) {
// Compute the capacity mask.
const int kCapacityOffset =
StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
- __ SmiToInteger32(r2, FieldOperand(r0, kCapacityOffset));
- __ decl(r2);
+ __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
+ __ decl(r0);
// Generate an unrolled loop that performs a few probes before
// giving up. Measurements done on Gmail indicate that 2 probes
@@ -120,38 +125,153 @@
StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movl(r4, FieldOperand(name, String::kHashFieldOffset));
- __ shrl(r4, Immediate(String::kHashShift));
+ __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
+ __ shrl(r1, Immediate(String::kHashShift));
if (i > 0) {
- __ addl(r4, Immediate(StringDictionary::GetProbeOffset(i)));
+ __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
}
- __ and_(r4, r2);
+ __ and_(r1, r0);
// Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(r4, Operand(r4, r4, times_2, 0)); // r4 = r4 * 3
+ __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
// Check if the key is identical to the name.
- __ cmpq(name, Operand(r0, r4, times_pointer_size,
+ __ cmpq(name, Operand(elements, r1, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
if (i != kProbes - 1) {
- __ j(equal, &done);
+ __ j(equal, done);
} else {
- __ j(not_equal, miss_label);
+ __ j(not_equal, miss);
}
}
+}
- // Check that the value is a normal property.
+
+// Helper function used to load a property from a dictionary backing storage.
+// This function may return false negatives, so miss_label
+// must always call a backup property load that is complete.
+// This function is safe to call if name is not a symbol, and will jump to
+// the miss_label in that case.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss_label,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1,
+ Register result) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is unchanged.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // r0 - used to hold the capacity of the property dictionary.
+ //
+ // r1 - used to hold the index into the property dictionary.
+ //
+ // result - holds the result on exit if the load succeeded.
+
+ Label done;
+
+ // Probe the dictionary.
+ GenerateStringDictionaryProbes(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ r0,
+ r1);
+
+ // If probing finds an entry in the dictionary, r0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property.
__ bind(&done);
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ Test(Operand(r0, r4, times_pointer_size, kDetailsOffset - kHeapObjectTag),
+ __ Test(Operand(elements, r1, times_pointer_size,
+ kDetailsOffset - kHeapObjectTag),
Smi::FromInt(PropertyDetails::TypeField::mask()));
__ j(not_zero, miss_label);
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ movq(result,
- Operand(r0, r4, times_pointer_size, kValueOffset - kHeapObjectTag));
+ Operand(elements, r1, times_pointer_size,
+ kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to store a property to a dictionary backing
+// storage. This function may fail to store a property even though it
+// is in the dictionary, so code at miss_label must always call a
+// backup property store that is complete. This function is safe to
+// call if name is not a symbol, and will jump to the miss_label in
+// that case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss_label,
+ Register elements,
+ Register name,
+ Register value,
+ Register scratch0,
+ Register scratch1) {
+ // Register use:
+ //
+ // elements - holds the property dictionary on entry and is clobbered.
+ //
+ // name - holds the name of the property on entry and is unchanged.
+ //
+ // value - holds the value to store and is unchanged.
+ //
+ // scratch0 - used for index into the property dictionary and is clobbered.
+ //
+ // scratch1 - used to hold the capacity of the property dictionary and is
+ // clobbered.
+ Label done;
+
+ // Probe the dictionary.
+ GenerateStringDictionaryProbes(masm,
+ miss_label,
+ &done,
+ elements,
+ name,
+ scratch0,
+ scratch1);
+
+ // If probing finds an entry in the dictionary, scratch0 contains the
+ // index into the dictionary. Check that the value is a normal
+ // property that is not read only.
+ __ bind(&done);
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask
+ = (PropertyDetails::TypeField::mask() |
+ PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ __ Test(Operand(elements,
+ scratch1,
+ times_pointer_size,
+ kDetailsOffset - kHeapObjectTag),
+ Smi::FromInt(kTypeAndReadOnlyMask));
+ __ j(not_zero, miss_label);
+
+ // Store the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ lea(scratch1, Operand(elements,
+ scratch1,
+ times_pointer_size,
+ kValueOffset - kHeapObjectTag));
+ __ movq(Operand(scratch1, 0), value);
+
+ // Update write barrier. Make sure not to clobber the value.
+ __ movq(scratch0, value);
+ __ RecordWrite(elements, scratch1, scratch0);
}
@@ -327,6 +447,8 @@
// -- rsp[0] : return address
// -----------------------------------
+ __ IncrementCounter(&Counters::keyed_load_miss, 1);
+
__ pop(rbx);
__ push(rdx); // receiver
__ push(rax); // name
@@ -360,6 +482,7 @@
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver,
Register map,
+ int interceptor_bit,
Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
@@ -379,7 +502,8 @@
// Check bit field.
__ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+ Immediate((1 << Map::kIsAccessCheckNeeded) |
+ (1 << interceptor_bit)));
__ j(not_zero, slow);
}
@@ -500,14 +624,15 @@
Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary, check_number_dictionary;
- GenerateKeyedLoadReceiverCheck(masm, rdx, rcx, &slow);
-
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
+ GenerateKeyedLoadReceiverCheck(
+ masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
+
GenerateFastArrayLoad(masm,
rdx,
rax,
@@ -557,6 +682,9 @@
__ bind(&check_string);
GenerateKeyStringCheck(masm, rax, rcx, rbx, &index_string, &slow);
+ GenerateKeyedLoadReceiverCheck(
+ masm, rdx, rcx, Map::kHasNamedInterceptor, &slow);
+
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx.
__ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
@@ -608,15 +736,13 @@
__ bind(&probe_dictionary);
// rdx: receiver
// rax: key
- GenerateDictionaryLoad(masm,
- &slow,
- rbx,
- rdx,
- rcx,
- rax,
- rdi,
- rax,
- DICTIONARY_CHECK_DONE);
+ // rbx: elements
+
+ __ movq(rcx, FieldOperand(rdx, JSObject::kMapOffset));
+ __ movb(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
+
+ GenerateDictionaryLoad(masm, &slow, rbx, rax, rcx, rdi, rax);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
@@ -672,7 +798,7 @@
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, failed_allocation;
+ Label slow;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow);
@@ -731,7 +857,7 @@
__ movl(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalFloatArray:
- __ fld_s(Operand(rbx, rcx, times_4, 0));
+ __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
break;
default:
UNREACHABLE();
@@ -743,20 +869,16 @@
// For integer array types:
// rcx: value
// For floating-point array type:
- // FP(0): value
+ // xmm0: value as double.
- if (array_type == kExternalIntArray ||
- array_type == kExternalUnsignedIntArray) {
- // For the Int and UnsignedInt array types, we need to see whether
+ ASSERT(kSmiValueSize == 32);
+ if (array_type == kExternalUnsignedIntArray) {
+ // For the UnsignedInt array type, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
- if (array_type == kExternalIntArray) {
- __ JumpIfNotValidSmiValue(rcx, &box_int);
- } else {
- ASSERT_EQ(array_type, kExternalUnsignedIntArray);
- __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
- }
+
+ __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
__ Integer32ToSmi(rax, rcx);
__ ret(0);
@@ -765,42 +887,28 @@
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
- __ push(rcx);
- if (array_type == kExternalIntArray) {
- __ fild_s(Operand(rsp, 0));
- } else {
- ASSERT(array_type == kExternalUnsignedIntArray);
- // The value is zero-extended on the stack, because all pushes are
- // 64-bit and we loaded the value from memory with movl.
- __ fild_d(Operand(rsp, 0));
- }
- __ pop(rcx);
- // FP(0): value
- __ AllocateHeapNumber(rcx, rbx, &failed_allocation);
+ // The value is zero-extended since we loaded the value from memory
+ // with movl.
+ __ cvtqsi2sd(xmm0, rcx);
+
+ __ AllocateHeapNumber(rcx, rbx, &slow);
// Set the value.
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
- __ AllocateHeapNumber(rcx, rbx, &failed_allocation);
+ __ AllocateHeapNumber(rcx, rbx, &slow);
// Set the value.
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ ret(0);
} else {
__ Integer32ToSmi(rax, rcx);
__ ret(0);
}
- // If we fail allocation of the HeapNumber, we still have a value on
- // top of the FPU stack. Remove it.
- __ bind(&failed_allocation);
- __ ffree();
- __ fincstp();
- // Fall through to slow case.
-
// Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
@@ -1086,10 +1194,8 @@
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
- __ push(rdx);
- __ fild_s(Operand(rsp, 0));
- __ pop(rdx);
- __ fstp_s(Operand(rbx, rdi, times_4, 0));
+ __ cvtlsi2ss(xmm0, rdx);
+ __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
break;
default:
UNREACHABLE();
@@ -1110,53 +1216,41 @@
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rdi: untagged index
// rbx: base pointer of external storage
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
- __ fstp_s(Operand(rbx, rdi, times_4, 0));
+ __ cvtsd2ss(xmm0, xmm0);
+ __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
__ ret(0);
} else {
// Need to perform float-to-int conversion.
- // Test the top of the FP stack for NaN.
- Label is_nan;
- __ fucomi(0);
- __ j(parity_even, &is_nan);
+ // Test the value for NaN.
- __ push(rdx); // Make room on the stack. Receiver is no longer needed.
- __ fistp_d(Operand(rsp, 0));
- __ pop(rdx);
+ // Convert to int32 and store the low byte/word.
+ // If the value is NaN or +/-infinity, the result is 0x80000000,
+ // which is automatically zero when taken mod 2^n, n < 32.
// rdx: value (converted to an untagged integer)
// rdi: untagged index
// rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
+ __ cvtsd2si(rdx, xmm0);
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
+ __ cvtsd2si(rdx, xmm0);
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray: {
- // We also need to explicitly check for +/-Infinity. These are
- // converted to MIN_INT, but we need to be careful not to
- // confuse with legal uses of MIN_INT. Since MIN_INT truncated
- // to 8 or 16 bits is zero, we only perform this test when storing
- // 32-bit ints.
- Label not_infinity;
- // This test would apparently detect both NaN and Infinity,
- // but we've already checked for NaN using the FPU hardware
- // above.
- __ movzxwq(rcx, FieldOperand(rax, HeapNumber::kValueOffset + 6));
- __ and_(rcx, Immediate(0x7FF0));
- __ cmpw(rcx, Immediate(0x7FF0));
- __ j(not_equal, ¬_infinity);
- __ movq(rdx, Immediate(0));
- __ bind(¬_infinity);
+ // Convert to int64, so that NaN and infinities become
+ // 0x8000000000000000, which is zero mod 2^32.
+ __ cvtsd2siq(rdx, xmm0);
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
}
@@ -1165,31 +1259,6 @@
break;
}
__ ret(0);
-
- __ bind(&is_nan);
- // rdi: untagged index
- // rbx: base pointer of external storage
- __ ffree();
- __ fincstp();
- __ movq(rdx, Immediate(0));
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0);
}
// Slow case: call runtime.
@@ -1212,6 +1281,13 @@
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
+
+ if (id == IC::kCallIC_Miss) {
+ __ IncrementCounter(&Counters::call_miss, 1);
+ } else {
+ __ IncrementCounter(&Counters::keyed_call_miss, 1);
+ }
+
// Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -1233,22 +1309,25 @@
__ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
- Label invoke, global;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
- __ JumpIfSmi(rdx, &invoke);
- __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
- __ j(equal, &global);
- __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke);
+ // This can happen only for regular CallIC but not KeyedCallIC.
+ if (id == IC::kCallIC_Miss) {
+ Label invoke, global;
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
+ __ JumpIfSmi(rdx, &invoke);
+ __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
+ __ j(equal, &global);
+ __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
+ __ j(not_equal, &invoke);
- // Patch the receiver on the stack.
- __ bind(&global);
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ // Patch the receiver on the stack.
+ __ bind(&global);
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+ __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ __ bind(&invoke);
+ }
// Invoke the function.
ParameterCount actual(argc);
- __ bind(&invoke);
__ InvokeFunction(rdi, actual, JUMP_FUNCTION);
}
@@ -1309,13 +1388,12 @@
}
-static void GenerateNormalHelper(MacroAssembler* masm,
- int argc,
- bool is_global_object,
- Label* miss) {
+static void GenerateFunctionTailCall(MacroAssembler* masm,
+ int argc,
+ Label* miss) {
// ----------- S t a t e -------------
// rcx : function name
- // rdx : receiver
+ // rdi : function
// rsp[0] : return address
// rsp[8] : argument argc
// rsp[16] : argument argc - 1
@@ -1323,21 +1401,11 @@
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- // Search dictionary - put result in register rdx.
- GenerateDictionaryLoad(
- masm, miss, rax, rdx, rbx, rcx, rdi, rdi, CHECK_DICTIONARY);
-
__ JumpIfSmi(rdi, miss);
// Check that the value is a JavaScript function.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rdx);
__ j(not_equal, miss);
- // Patch the receiver with the global proxy if necessary.
- if (is_global_object) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
-
// Invoke the function.
ParameterCount actual(argc);
__ InvokeFunction(rdi, actual, JUMP_FUNCTION);
@@ -1355,56 +1423,18 @@
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
- Label miss, global_object, non_global_object;
+ Label miss;
// Get the receiver of the function from the stack.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
+ GenerateStringDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
- // Check that the receiver is a valid JS object.
- // Because there are so many map checks and type checks, do not
- // use CmpObjectType, but load map and type into registers.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ movb(rax, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpb(rax, Immediate(FIRST_JS_OBJECT_TYPE));
- __ j(below, &miss);
+ // rax: elements
+ // Search the dictionary placing the result in rdi.
+ GenerateDictionaryLoad(masm, &miss, rax, rcx, rbx, rdi, rdi);
- // If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
- // Check for access to global object.
- __ cmpb(rax, Immediate(JS_GLOBAL_OBJECT_TYPE));
- __ j(equal, &global_object);
- __ cmpb(rax, Immediate(JS_BUILTINS_OBJECT_TYPE));
- __ j(not_equal, &non_global_object);
-
- // Accessing global object: Load and invoke.
- __ bind(&global_object);
- // Check that the global object does not require access checks.
- __ movb(rbx, FieldOperand(rbx, Map::kBitFieldOffset));
- __ testb(rbx, Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_equal, &miss);
- GenerateNormalHelper(masm, argc, true, &miss);
-
- // Accessing non-global object: Check for access to global proxy.
- Label global_proxy, invoke;
- __ bind(&non_global_object);
- __ cmpb(rax, Immediate(JS_GLOBAL_PROXY_TYPE));
- __ j(equal, &global_proxy);
- // Check that the non-global, non-global-proxy object does not
- // require access checks.
- __ movb(rbx, FieldOperand(rbx, Map::kBitFieldOffset));
- __ testb(rbx, Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_equal, &miss);
- __ bind(&invoke);
- GenerateNormalHelper(masm, argc, false, &miss);
-
- // Global object proxy access: Check access rights.
- __ bind(&global_proxy);
- __ CheckAccessGlobalProxy(rdx, rax, &miss);
- __ jmp(&invoke);
+ GenerateFunctionTailCall(masm, argc, &miss);
__ bind(&miss);
}
@@ -1498,7 +1528,8 @@
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
- GenerateKeyedLoadReceiverCheck(masm, rdx, rax, &slow_call);
+ GenerateKeyedLoadReceiverCheck(
+ masm, rdx, rax, Map::kHasIndexedInterceptor, &slow_call);
GenerateFastArrayLoad(
masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
@@ -1508,14 +1539,7 @@
// receiver in rdx is not used after this point.
// rcx: key
// rdi: function
-
- // Check that the value in edi is a JavaScript function.
- __ JumpIfSmi(rdi, &slow_call);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
- __ j(not_equal, &slow_call);
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+ GenerateFunctionTailCall(masm, argc, &slow_call);
__ bind(&check_number_dictionary);
// eax: elements
@@ -1523,6 +1547,7 @@
// Check whether the elements is a number dictionary.
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
+ __ j(not_equal, &slow_load);
__ SmiToInteger32(rbx, rcx);
// ebx: untagged index
GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi);
@@ -1550,15 +1575,15 @@
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(masm, rdx, rax, &lookup_monomorphic_cache);
+ GenerateKeyedLoadReceiverCheck(
+ masm, rdx, rax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
__ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &lookup_monomorphic_cache);
- GenerateDictionaryLoad(
- masm, &slow_load, rbx, rdx, rax, rcx, rdi, rdi, DICTIONARY_CHECK_DONE);
+ GenerateDictionaryLoad(masm, &slow_load, rbx, rcx, rax, rdi, rdi);
__ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
__ jmp(&do_call);
@@ -1620,6 +1645,8 @@
// -- rsp[0] : return address
// -----------------------------------
+ __ IncrementCounter(&Counters::load_miss, 1);
+
__ pop(rbx);
__ push(rax); // receiver
__ push(rcx); // name
@@ -1683,38 +1710,15 @@
// -- rcx : name
// -- rsp[0] : return address
// -----------------------------------
- Label miss, probe, global;
+ Label miss;
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rax, &miss);
+ GenerateStringDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss);
- // Check that the receiver is a valid JS object.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
- __ j(below, &miss);
-
- // If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
- // Check for access to global object (unlikely).
- __ CmpInstanceType(rbx, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, &global);
-
- // Check for non-global object that requires access check.
- __ testl(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &miss);
-
+ // rdx: elements
// Search the dictionary placing the result in rax.
- __ bind(&probe);
- GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx,
- rcx, rdi, rax, CHECK_DICTIONARY);
+ GenerateDictionaryLoad(masm, &miss, rdx, rcx, rbx, rdi, rax);
__ ret(0);
- // Global object access: Check access rights.
- __ bind(&global);
- __ CheckAccessGlobalProxy(rax, rdx, &miss);
- __ jmp(&probe);
-
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
@@ -1852,6 +1856,28 @@
}
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ Label miss, restore_miss;
+
+ GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
+
+ GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9);
+ __ IncrementCounter(&Counters::store_normal_hit, 1);
+ __ ret(0);
+
+ __ bind(&miss);
+ __ IncrementCounter(&Counters::store_normal_miss, 1);
+ GenerateMiss(masm);
+}
+
+
#undef __
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 24bac7d..76200d7 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -105,12 +105,6 @@
}
-// For page containing |object| mark region covering [object+offset] dirty.
-// object is the object being stored into, value is the object being stored.
-// If offset is zero, then the index register contains the array index into
-// the elements array represented a zero extended int32. Otherwise it can be
-// used as a scratch register.
-// All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
@@ -141,6 +135,35 @@
}
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
+ Register value) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are esi.
+ ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+ JumpIfSmi(value, &done);
+
+ InNewSpace(object, value, equal, &done);
+
+ RecordWriteHelper(object, address, value);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ }
+}
+
+
void MacroAssembler::RecordWriteNonSmi(Register object,
int offset,
Register scratch,
@@ -351,7 +374,7 @@
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- movq(rax, Immediate(num_arguments));
+ Set(rax, num_arguments);
movq(rbx, ExternalReference(f));
CEntryStub ces(f->result_size);
CallStub(&ces);
@@ -360,7 +383,7 @@
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
int num_arguments) {
- movq(rax, Immediate(num_arguments));
+ Set(rax, num_arguments);
movq(rbx, ext);
CEntryStub stub(1);
@@ -382,7 +405,7 @@
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- movq(rax, Immediate(num_arguments));
+ Set(rax, num_arguments);
JumpToExternalReference(ext, result_size);
}
@@ -444,7 +467,7 @@
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
- xor_(dst, dst);
+ xorl(dst, dst);
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else if (is_uint32(x)) {
@@ -454,7 +477,6 @@
}
}
-
void MacroAssembler::Set(const Operand& dst, int64_t x) {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
@@ -469,6 +491,78 @@
static int kSmiShift = kSmiTagSize + kSmiShiftSize;
+Register MacroAssembler::GetSmiConstant(Smi* source) {
+ int value = source->value();
+ if (value == 0) {
+ xorl(kScratchRegister, kScratchRegister);
+ return kScratchRegister;
+ }
+ if (value == 1) {
+ return kSmiConstantRegister;
+ }
+ LoadSmiConstant(kScratchRegister, source);
+ return kScratchRegister;
+}
+
+void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
+ if (FLAG_debug_code) {
+ movq(dst,
+ reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
+ RelocInfo::NONE);
+ cmpq(dst, kSmiConstantRegister);
+ if (allow_stub_calls()) {
+ Assert(equal, "Uninitialized kSmiConstantRegister");
+ } else {
+ Label ok;
+ j(equal, &ok);
+ int3();
+ bind(&ok);
+ }
+ }
+ if (source->value() == 0) {
+ xorl(dst, dst);
+ return;
+ }
+ int value = source->value();
+ bool negative = value < 0;
+ unsigned int uvalue = negative ? -value : value;
+
+ switch (uvalue) {
+ case 9:
+ lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
+ break;
+ case 8:
+ xorl(dst, dst);
+ lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
+ break;
+ case 4:
+ xorl(dst, dst);
+ lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
+ break;
+ case 5:
+ lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
+ break;
+ case 3:
+ lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
+ break;
+ case 2:
+ lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
+ break;
+ case 1:
+ movq(dst, kSmiConstantRegister);
+ break;
+ case 0:
+ UNREACHABLE();
+ return;
+ default:
+ movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
+ return;
+ }
+ if (negative) {
+ neg(dst);
+ }
+}
+
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
ASSERT_EQ(0, kSmiTag);
if (!dst.is(src)) {
@@ -629,9 +723,10 @@
Condition MacroAssembler::CheckPositiveSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
+ // Make mask 0x8000000000000001 and test that both bits are zero.
movq(kScratchRegister, src);
rol(kScratchRegister, Immediate(1));
- testl(kScratchRegister, Immediate(0x03));
+ testb(kScratchRegister, Immediate(3));
return zero;
}
@@ -640,9 +735,9 @@
if (first.is(second)) {
return CheckSmi(first);
}
- movl(kScratchRegister, first);
- orl(kScratchRegister, second);
- testb(kScratchRegister, Immediate(kSmiTagMask));
+ ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
+ leal(kScratchRegister, Operand(first, second, times_1, 0));
+ testb(kScratchRegister, Immediate(0x03));
return zero;
}
@@ -652,15 +747,14 @@
if (first.is(second)) {
return CheckPositiveSmi(first);
}
- movl(kScratchRegister, first);
- orl(kScratchRegister, second);
+ movq(kScratchRegister, first);
+ or_(kScratchRegister, second);
rol(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(0x03));
return zero;
}
-
Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
if (first.is(second)) {
return CheckSmi(first);
@@ -673,11 +767,10 @@
Condition MacroAssembler::CheckIsMinSmi(Register src) {
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- movq(kScratchRegister, src);
- rol(kScratchRegister, Immediate(1));
- cmpq(kScratchRegister, Immediate(1));
- return equal;
+ ASSERT(!src.is(kScratchRegister));
+ // If we overflow by subtracting one, it's the minimal smi value.
+ cmpq(src, kSmiConstantRegister);
+ return overflow;
}
@@ -690,8 +783,8 @@
Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
// An unsigned 32-bit integer value is valid as long as the high bit
// is not set.
- testq(src, Immediate(0x80000000));
- return zero;
+ testl(src, src);
+ return positive;
}
@@ -784,10 +877,10 @@
}
Assert(no_overflow, "Smi subtraction overflow");
} else if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- subq(kScratchRegister, src2);
+ movq(kScratchRegister, src2);
+ cmpq(src1, kScratchRegister);
j(overflow, on_not_smi_result);
- movq(src1, kScratchRegister);
+ subq(src1, kScratchRegister);
} else {
movq(dst, src1);
subq(dst, src2);
@@ -860,7 +953,7 @@
JumpIfNotSmi(src, on_not_smi_result);
Register tmp = (dst.is(src) ? kScratchRegister : dst);
- Move(tmp, constant);
+ LoadSmiConstant(tmp, constant);
addq(tmp, src);
j(overflow, on_not_smi_result);
if (dst.is(src)) {
@@ -874,14 +967,46 @@
if (!dst.is(src)) {
movq(dst, src);
}
+ return;
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
-
- Move(kScratchRegister, constant);
- addq(dst, kScratchRegister);
+ switch (constant->value()) {
+ case 1:
+ addq(dst, kSmiConstantRegister);
+ return;
+ case 2:
+ lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ return;
+ case 4:
+ lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ return;
+ case 8:
+ lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ return;
+ default:
+ Register constant_reg = GetSmiConstant(constant);
+ addq(dst, constant_reg);
+ return;
+ }
} else {
- Move(dst, constant);
- addq(dst, src);
+ switch (constant->value()) {
+ case 1:
+ lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
+ return;
+ case 2:
+ lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ return;
+ case 4:
+ lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ return;
+ case 8:
+ lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ return;
+ default:
+ LoadSmiConstant(dst, constant);
+ addq(dst, src);
+ return;
+ }
}
}
@@ -904,12 +1029,12 @@
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- Move(kScratchRegister, constant);
- addq(kScratchRegister, dst);
+ LoadSmiConstant(kScratchRegister, constant);
+ addq(kScratchRegister, src);
j(overflow, on_not_smi_result);
movq(dst, kScratchRegister);
} else {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
addq(dst, src);
j(overflow, on_not_smi_result);
}
@@ -923,19 +1048,17 @@
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
-
- Move(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(constant);
+ subq(dst, constant_reg);
} else {
- // Subtract by adding the negative, to do it in two operations.
if (constant->value() == Smi::kMinValue) {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
addq(dst, src);
} else {
// Subtract by adding the negation.
- Move(dst, Smi::FromInt(-constant->value()));
+ LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
addq(dst, src);
}
}
@@ -957,11 +1080,11 @@
// We test the non-negativeness before doing the subtraction.
testq(src, src);
j(not_sign, on_not_smi_result);
- Move(kScratchRegister, constant);
+ LoadSmiConstant(kScratchRegister, constant);
subq(dst, kScratchRegister);
} else {
// Subtract by adding the negation.
- Move(kScratchRegister, Smi::FromInt(-constant->value()));
+ LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
addq(kScratchRegister, dst);
j(overflow, on_not_smi_result);
movq(dst, kScratchRegister);
@@ -972,13 +1095,13 @@
// We test the non-negativeness before doing the subtraction.
testq(src, src);
j(not_sign, on_not_smi_result);
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
addq(dst, src);
} else {
// Subtract by adding the negation.
- Move(dst, Smi::FromInt(-(constant->value())));
+ LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
addq(dst, src);
j(overflow, on_not_smi_result);
}
@@ -1132,10 +1255,10 @@
xor_(dst, dst);
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- Move(kScratchRegister, constant);
- and_(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(constant);
+ and_(dst, constant_reg);
} else {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
and_(dst, src);
}
}
@@ -1152,10 +1275,10 @@
void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- Move(kScratchRegister, constant);
- or_(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(constant);
+ or_(dst, constant_reg);
} else {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
or_(dst, src);
}
}
@@ -1172,10 +1295,10 @@
void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- Move(kScratchRegister, constant);
- xor_(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(constant);
+ xor_(dst, constant_reg);
} else {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
xor_(dst, src);
}
}
@@ -1343,6 +1466,7 @@
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
}
+
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
@@ -1568,8 +1692,8 @@
if (is_int32(smi)) {
push(Immediate(static_cast<int32_t>(smi)));
} else {
- Set(kScratchRegister, smi);
- push(kScratchRegister);
+ Register constant = GetSmiConstant(source);
+ push(constant);
}
}
@@ -1678,8 +1802,7 @@
void MacroAssembler::FCmp() {
fucomip();
- ffree(0);
- fincstp();
+ fstp(0);
}
@@ -1938,7 +2061,7 @@
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
- movq(rax, Immediate(actual.immediate()));
+ Set(rax, actual.immediate());
if (expected.immediate() ==
SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
// Don't worry about adapting arguments for built-ins that
@@ -1947,7 +2070,7 @@
// arguments.
definitely_matches = true;
} else {
- movq(rbx, Immediate(expected.immediate()));
+ Set(rbx, expected.immediate());
}
}
} else {
@@ -1958,7 +2081,7 @@
cmpq(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke);
ASSERT(expected.reg().is(rbx));
- movq(rax, Immediate(actual.immediate()));
+ Set(rax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
@@ -2110,10 +2233,10 @@
movq(rax, rsi);
store_rax(context_address);
- // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
+ // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, r14, times_pointer_size, offset));
+ lea(r12, Operand(rbp, r14, times_pointer_size, offset));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
@@ -2159,7 +2282,7 @@
void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
// Registers:
- // r15 : argv
+ // r12 : argv
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
@@ -2179,7 +2302,7 @@
// Pop everything up to and including the arguments and the receiver
// from the caller stack.
- lea(rsp, Operand(r15, 1 * kPointerSize));
+ lea(rsp, Operand(r12, 1 * kPointerSize));
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Top::k_context_address);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index bb0b681..a256ab8 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -47,8 +47,11 @@
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
-static const Register kScratchRegister = { 10 }; // r10.
-static const Register kRootRegister = { 13 }; // r13
+static const Register kScratchRegister = { 10 }; // r10.
+static const Register kSmiConstantRegister = { 15 }; // r15 (callee save).
+static const Register kRootRegister = { 13 }; // r13 (callee save).
+// Value of smi in kSmiConstantRegister.
+static const int kSmiConstantRegisterValue = 1;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
@@ -93,16 +96,27 @@
Condition cc,
Label* branch);
- // For page containing |object| mark region covering [object+offset] dirty.
- // object is the object being stored into, value is the object being stored.
- // If offset is zero, then the scratch register contains the array index into
- // the elements array represented as a Smi.
- // All registers are clobbered by the operation.
+ // For page containing |object| mark region covering [object+offset]
+ // dirty. |object| is the object being stored into, |value| is the
+ // object being stored. If |offset| is zero, then the |scratch|
+ // register contains the array index into the elements array
+ // represented as a Smi. All registers are clobbered by the
+ // operation. RecordWrite filters out smis so it does not update the
+ // write barrier if the value is a smi.
void RecordWrite(Register object,
int offset,
Register value,
Register scratch);
+ // For page containing |object| mark region covering [address]
+ // dirty. |object| is the object being stored into, |value| is the
+ // object being stored. All registers are clobbered by the
+ // operation. RecordWrite filters out smis so it does not update
+ // the write barrier if the value is a smi.
+ void RecordWrite(Register object,
+ Register address,
+ Register value);
+
// For page containing |object| mark region covering [object+offset] dirty.
// The value is known to not be a smi.
// object is the object being stored into, value is the object being stored.
@@ -191,6 +205,12 @@
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
+ void InitializeSmiConstantRegister() {
+ movq(kSmiConstantRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
+ RelocInfo::NONE);
+ }
+
// Conversions between tagged smi values and non-tagged integer values.
// Tag an integer value. The result must be known to be a valid smi value.
@@ -458,11 +478,12 @@
// Basic Smi operations.
void Move(Register dst, Smi* source) {
- Set(dst, reinterpret_cast<int64_t>(source));
+ LoadSmiConstant(dst, source);
}
void Move(const Operand& dst, Smi* source) {
- Set(dst, reinterpret_cast<int64_t>(source));
+ Register constant = GetSmiConstant(source);
+ movq(dst, constant);
}
void Push(Smi* smi);
@@ -546,7 +567,8 @@
Register map,
Register instance_type);
- // FCmp is similar to integer cmp, but requires unsigned
+ // FCmp compares and pops the two values on top of the FPU stack.
+ // The flag results are similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
@@ -808,6 +830,14 @@
private:
bool generating_stub_;
bool allow_stub_calls_;
+
+ // Returns a register holding the smi value. The register MUST NOT be
+ // modified. It may be the "smi 1 constant" register.
+ Register GetSmiConstant(Smi* value);
+
+ // Moves the smi value to the destination register.
+ void LoadSmiConstant(Register dst, Smi* value);
+
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
diff --git a/src/x64/register-allocator-x64-inl.h b/src/x64/register-allocator-x64-inl.h
index c7c18b3..c6bea3a 100644
--- a/src/x64/register-allocator-x64-inl.h
+++ b/src/x64/register-allocator-x64-inl.h
@@ -38,7 +38,8 @@
bool RegisterAllocator::IsReserved(Register reg) {
return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
- reg.is(kScratchRegister) || reg.is(kRootRegister);
+ reg.is(kScratchRegister) || reg.is(kRootRegister) ||
+ reg.is(kSmiConstantRegister);
}
@@ -58,11 +59,11 @@
5, // r8
6, // r9
-1, // r10 Scratch register.
- 9, // r11
- 10, // r12
+ 8, // r11
+ 9, // r12
-1, // r13 Roots array. This is callee saved.
7, // r14
- 8 // r15
+ -1 // r15 Smi constant register.
};
return kNumbers[reg.code()];
}
@@ -71,7 +72,7 @@
Register RegisterAllocator::ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters);
const Register kRegisters[] =
- { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r15, r11, r12 };
+ { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r11, r12 };
return kRegisters[num];
}
diff --git a/src/x64/register-allocator-x64.h b/src/x64/register-allocator-x64.h
index 8d666d2..a2884d9 100644
--- a/src/x64/register-allocator-x64.h
+++ b/src/x64/register-allocator-x64.h
@@ -33,7 +33,7 @@
class RegisterAllocatorConstants : public AllStatic {
public:
- static const int kNumRegisters = 11;
+ static const int kNumRegisters = 10;
static const int kInvalidRegister = -1;
};
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 1e103ac..ab75b96 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -2125,7 +2125,8 @@
Register scratch,
String* name,
int save_at_depth,
- Label* miss) {
+ Label* miss,
+ Register extra) {
// Check that the maps haven't changed.
Register result =
masm()->CheckMaps(object,
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index e65378d..3a0b72f 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -115,25 +115,45 @@
Handle<Object> undefined = Factory::undefined_value();
FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
- if (count == 1) {
- __ Push(undefined);
- } else if (count < kLocalVarBound) {
- // For less locals the unrolled loop is more compact.
- __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
+ if (count < kLocalVarBound) {
+ // For fewer locals the unrolled loop is more compact.
+
+ // Hope for one of the first eight registers, where the push operation
+ // takes only one byte (kScratchRegister needs the REX.W bit).
+ Result tmp = cgen()->allocator()->Allocate();
+ ASSERT(tmp.is_valid());
+ __ movq(tmp.reg(), undefined, RelocInfo::EMBEDDED_OBJECT);
for (int i = 0; i < count; i++) {
- __ push(kScratchRegister);
+ __ push(tmp.reg());
}
} else {
// For more locals a loop in generated code is more compact.
Label alloc_locals_loop;
Result cnt = cgen()->allocator()->Allocate();
ASSERT(cnt.is_valid());
- __ movq(cnt.reg(), Immediate(count));
__ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
+#ifdef DEBUG
+ Label loop_size;
+ __ bind(&loop_size);
+#endif
+ if (is_uint8(count)) {
+ // Loading imm8 is shorter than loading imm32.
+ // Loading only partial byte register, and using decb below.
+ __ movb(cnt.reg(), Immediate(count));
+ } else {
+ __ movl(cnt.reg(), Immediate(count));
+ }
__ bind(&alloc_locals_loop);
__ push(kScratchRegister);
- __ decl(cnt.reg());
+ if (is_uint8(count)) {
+ __ decb(cnt.reg());
+ } else {
+ __ decl(cnt.reg());
+ }
__ j(not_zero, &alloc_locals_loop);
+#ifdef DEBUG
+ CHECK(masm()->SizeOfCodeGeneratedSince(&loop_size) < kLocalVarBound);
+#endif
}
for (int i = 0; i < count; i++) {
elements_.Add(initial_value);
@@ -1201,7 +1221,7 @@
// call trampolines per different arguments counts encountered.
Result num_args = cgen()->allocator()->Allocate(rax);
ASSERT(num_args.is_valid());
- __ movq(num_args.reg(), Immediate(arg_count));
+ __ Set(num_args.reg(), arg_count);
function.Unuse();
num_args.Unuse();
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index dc270fe..adf47e2 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -200,7 +200,7 @@
inline void PrepareForReturn();
// Number of local variables after when we use a loop for allocating.
- static const int kLocalVarBound = 7;
+ static const int kLocalVarBound = 14;
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
@@ -388,6 +388,13 @@
// Duplicate the top element of the frame.
void Dup() { PushFrameSlotAt(element_count() - 1); }
+ // Duplicate the n'th element from the top of the frame.
+ // Dup(1) is equivalent to Dup().
+ void Dup(int n) {
+ ASSERT(n > 0);
+ PushFrameSlotAt(element_count() - n);
+ }
+
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
Result Pop();