Update V8 to r3121 as required for WebKit update.
Change-Id: Ic53e0aef9a9eb9b71ee7d25a8aef61520bba899c
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 9a5352b..5fa75ec 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -52,7 +52,7 @@
if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // relocate entry
- } else if (rmode_ == JS_RETURN && IsCallInstruction()) {
+ } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
@@ -85,19 +85,25 @@
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return *reinterpret_cast<Object**>(pc_);
+ return Memory::Object_at(pc_);
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_Handle_at(pc_);
}
Object** RelocInfo::target_object_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(pc_);
+ return &Memory::Object_at(pc_);
}
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- *reinterpret_cast<Object**>(pc_) = target;
+ Memory::Object_at(pc_) = target;
}
@@ -108,36 +114,36 @@
Address RelocInfo::call_address() {
- ASSERT(IsCallInstruction());
+ ASSERT(IsPatchedReturnSequence());
return Assembler::target_address_at(pc_ + 1);
}
void RelocInfo::set_call_address(Address target) {
- ASSERT(IsCallInstruction());
+ ASSERT(IsPatchedReturnSequence());
Assembler::set_target_address_at(pc_ + 1, target);
}
Object* RelocInfo::call_object() {
- ASSERT(IsCallInstruction());
+ ASSERT(IsPatchedReturnSequence());
return *call_object_address();
}
Object** RelocInfo::call_object_address() {
- ASSERT(IsCallInstruction());
+ ASSERT(IsPatchedReturnSequence());
return reinterpret_cast<Object**>(pc_ + 1);
}
void RelocInfo::set_call_object(Object* target) {
- ASSERT(IsCallInstruction());
+ ASSERT(IsPatchedReturnSequence());
*call_object_address() = target;
}
-bool RelocInfo::IsCallInstruction() {
+bool RelocInfo::IsPatchedReturnSequence() {
return *pc_ == 0xE8;
}
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index b8dda17..698377a 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -1166,6 +1166,19 @@
}
+void Assembler::subb(const Operand& op, int8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (op.is_reg(eax)) {
+ EMIT(0x2c);
+ } else {
+ EMIT(0x80);
+ emit_operand(ebp, op); // ebp == 5
+ }
+ EMIT(imm8);
+}
+
+
void Assembler::sub(const Operand& dst, const Immediate& x) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1837,6 +1850,22 @@
}
+void Assembler::fucomi(int i) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDB);
+ EMIT(0xE8 + i);
+}
+
+
+void Assembler::fucomip() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDF);
+ EMIT(0xE9);
+}
+
+
void Assembler::fcompp() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2096,7 +2125,7 @@
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > Heap::OldGenerationSize())) {
+ (desc.buffer_size > Heap::MaxOldGenerationSize())) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 610017b..4d9f08b 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -367,6 +367,10 @@
static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(Feature f) {
+ if (f == SSE2 && !FLAG_enable_sse2) return false;
+ if (f == SSE3 && !FLAG_enable_sse3) return false;
+ if (f == CMOV && !FLAG_enable_cmov) return false;
+ if (f == RDTSC && !FLAG_enable_rdtsc) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
// Check whether a feature is currently enabled.
@@ -590,6 +594,7 @@
void shr(Register dst);
void shr_cl(Register dst);
+ void subb(const Operand& dst, int8_t imm8);
void sub(const Operand& dst, const Immediate& x);
void sub(Register dst, const Operand& src);
void sub(const Operand& dst, Register src);
@@ -697,6 +702,8 @@
void ftst();
void fucomp(int i);
void fucompp();
+ void fucomi(int i);
+ void fucomip();
void fcompp();
void fnstsw_ax();
void fwait();
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 0e314b9..a339e90 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -697,18 +697,6 @@
}
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
// convert it to a boolean in the condition code register or jump to
// 'false_target'/'true_target' as appropriate.
@@ -773,13 +761,6 @@
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
- // Allocate a heap number in new space with undefined value.
- // Returns tagged pointer in eax, or jumps to need_gc if new space is full.
- static void AllocateHeapNumber(MacroAssembler* masm,
- Label* need_gc,
- Register scratch1,
- Register scratch2,
- Register result);
};
@@ -824,10 +805,8 @@
void DeferredInlineBinaryOperation::Generate() {
- __ push(left_);
- __ push(right_);
- GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
- __ CallStub(&stub);
+ GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, left_, right_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -856,16 +835,16 @@
// Bit operations always assume they likely operate on Smis. Still only
// generate the inline Smi check code if this operation is part of a loop.
flags = (loop_nesting() > 0)
- ? SMI_CODE_INLINED
- : SMI_CODE_IN_STUB;
+ ? NO_SMI_CODE_IN_STUB
+ : NO_GENERIC_BINARY_FLAGS;
break;
default:
// By default only inline the Smi check code for likely smis if this
// operation is part of a loop.
flags = ((loop_nesting() > 0) && type->IsLikelySmi())
- ? SMI_CODE_INLINED
- : SMI_CODE_IN_STUB;
+ ? NO_SMI_CODE_IN_STUB
+ : NO_GENERIC_BINARY_FLAGS;
break;
}
@@ -924,16 +903,15 @@
return;
}
- if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
+ if (((flags & NO_SMI_CODE_IN_STUB) != 0) && !generate_no_smi_code) {
LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
} else {
frame_->Push(&left);
frame_->Push(&right);
// If we know the arguments aren't smis, use the binary operation stub
// that does not check for the fast smi case.
- // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
if (generate_no_smi_code) {
- flags = SMI_CODE_INLINED;
+ flags = NO_SMI_CODE_IN_STUB;
}
GenericBinaryOpStub stub(op, overwrite_mode, flags);
Result answer = frame_->CallStub(&stub, 2);
@@ -1376,14 +1354,12 @@
void DeferredInlineSmiOperation::Generate() {
- __ push(src_);
- __ push(Immediate(value_));
// For mod we don't generate all the Smi code inline.
GenericBinaryOpStub stub(
op_,
overwrite_mode_,
- (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
- __ CallStub(&stub);
+ (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, src_, value_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -1417,10 +1393,8 @@
void DeferredInlineSmiOperationReversed::Generate() {
- __ push(Immediate(value_));
- __ push(src_);
- GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
+ GenericBinaryOpStub igostub(op_, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, value_, src_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -1449,10 +1423,8 @@
void DeferredInlineSmiAdd::Generate() {
// Undo the optimistic add operation and call the shared stub.
__ sub(Operand(dst_), Immediate(value_));
- __ push(dst_);
- __ push(Immediate(value_));
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, dst_, value_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -1481,10 +1453,8 @@
void DeferredInlineSmiAddReversed::Generate() {
// Undo the optimistic add operation and call the shared stub.
__ sub(Operand(dst_), Immediate(value_));
- __ push(Immediate(value_));
- __ push(dst_);
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, value_, dst_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -1514,10 +1484,8 @@
void DeferredInlineSmiSub::Generate() {
// Undo the optimistic sub operation and call the shared stub.
__ add(Operand(dst_), Immediate(value_));
- __ push(dst_);
- __ push(Immediate(value_));
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, dst_, value_);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
@@ -2295,8 +2263,8 @@
// allow us to push the arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1);
+ frame_->EmitPush(esi); // The context is the first argument.
frame_->EmitPush(Immediate(pairs));
- frame_->EmitPush(esi); // The context is the second argument.
frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
@@ -2711,288 +2679,332 @@
}
-void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
ASSERT(!in_spilled_code());
- Comment cmnt(masm_, "[ LoopStatement");
+ Comment cmnt(masm_, "[ DoWhileStatement");
CodeForStatementPosition(node);
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ JumpTarget body(JumpTarget::BIDIRECTIONAL);
+ IncrementLoopNesting();
- // Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
- // known result for the test expression, with no side effects.
- enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
- if (node->cond() == NULL) {
- ASSERT(node->type() == LoopStatement::FOR_LOOP);
- info = ALWAYS_TRUE;
- } else {
- Literal* lit = node->cond()->AsLiteral();
- if (lit != NULL) {
- if (lit->IsTrue()) {
- info = ALWAYS_TRUE;
- } else if (lit->IsFalse()) {
- info = ALWAYS_FALSE;
- }
- }
+ ConditionAnalysis info = AnalyzeCondition(node->cond());
+ // Label the top of the loop for the backward jump if necessary.
+ switch (info) {
+ case ALWAYS_TRUE:
+ // Use the continue target.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ break;
+ case ALWAYS_FALSE:
+ // No need to label it.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ break;
+ case DONT_KNOW:
+ // Continue is the test, so use the backward body target.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ body.Bind();
+ break;
}
- switch (node->type()) {
- case LoopStatement::DO_LOOP: {
- JumpTarget body(JumpTarget::BIDIRECTIONAL);
- IncrementLoopNesting();
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
- // Label the top of the loop for the backward jump if necessary.
- if (info == ALWAYS_TRUE) {
- // Use the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else if (info == ALWAYS_FALSE) {
- // No need to label it.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else {
- // Continue is the test, so use the backward body target.
- ASSERT(info == DONT_KNOW);
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- body.Bind();
+ // Compile the test.
+ switch (info) {
+ case ALWAYS_TRUE:
+ // If control flow can fall off the end of the body, jump back to
+ // the top and bind the break target at the exit.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
}
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Compile the test.
- if (info == ALWAYS_TRUE) {
- // If control flow can fall off the end of the body, jump back
- // to the top and bind the break target at the exit.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
-
- } else if (info == ALWAYS_FALSE) {
- // We may have had continues or breaks in the body.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
-
- } else {
- ASSERT(info == DONT_KNOW);
- // We have to compile the test expression if it can be reached by
- // control flow falling out of the body or via continue.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
- }
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
- }
- }
- break;
- }
-
- case LoopStatement::WHILE_LOOP: {
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function
- // literal twice.
- bool test_at_bottom = !node->may_have_function_literal();
-
- IncrementLoopNesting();
-
- // If the condition is always false and has no side effects, we
- // do not need to compile anything.
- if (info == ALWAYS_FALSE) break;
-
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
- }
-
- // Based on the condition analysis, compile the test as necessary.
- if (info == ALWAYS_TRUE) {
- // We will not compile the test expression. Label the top of
- // the loop with the continue target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
- if (test_at_bottom) {
- // Continue is the test at the bottom, no need to label the
- // test at the top. The body is a backward target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else {
- // Label the test at the top as the continue target. The
- // body is a forward-only target.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- }
- // Compile the test with the body as the true target and
- // preferred fall-through and with the break target as the
- // false target.
- ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may
- // have been unconditionally false (if there are no jumps to
- // the body).
- if (!body.is_linked()) break;
-
- // Otherwise, jump around the body on the fall through and
- // then bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
- }
-
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
-
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- if (info == ALWAYS_TRUE) {
- // The loop body has been labeled with the continue target.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- } else {
- ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
- if (test_at_bottom) {
- // If we have chosen to recompile the test at the bottom,
- // then it is the continue target.
- if (node->continue_target()->is_linked()) {
- node->continue_target()->Bind();
- }
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a backward
- // jump from here and thus an invalid fall-through).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
- }
- } else {
- // If we have chosen not to recompile the test at the
- // bottom, jump back to the one at the top.
- if (has_valid_frame()) {
- node->continue_target()->Jump();
- }
- }
- }
-
- // The break target may be already bound (by the condition), or
- // there may not be a valid frame. Bind it only if needed.
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
break;
- }
-
- case LoopStatement::FOR_LOOP: {
- // Do not duplicate conditions that may have function literal
- // subexpressions. This can cause us to compile the function
- // literal twice.
- bool test_at_bottom = !node->may_have_function_literal();
-
- // Compile the init expression if present.
- if (node->init() != NULL) {
- Visit(node->init());
+ case ALWAYS_FALSE:
+ // We may have had continues or breaks in the body.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
}
-
- IncrementLoopNesting();
-
- // If the condition is always false and has no side effects, we
- // do not need to compile anything else.
- if (info == ALWAYS_FALSE) break;
-
- // Target for backward edge if no test at the bottom, otherwise
- // unused.
- JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
- // Target for backward edge if there is a test at the bottom,
- // otherwise used as target for test at the top.
- JumpTarget body;
- if (test_at_bottom) {
- body.set_direction(JumpTarget::BIDIRECTIONAL);
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
}
-
- // Based on the condition analysis, compile the test as necessary.
- if (info == ALWAYS_TRUE) {
- // We will not compile the test expression. Label the top of
- // the loop.
- if (node->next() == NULL) {
- // Use the continue target if there is no update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // Otherwise use the backward loop target.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
- } else {
- ASSERT(info == DONT_KNOW);
- if (test_at_bottom) {
- // Continue is either the update expression or the test at
- // the bottom, no need to label the test at the top.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- } else if (node->next() == NULL) {
- // We are not recompiling the test at the bottom and there
- // is no update expression.
- node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
- node->continue_target()->Bind();
- } else {
- // We are not recompiling the test at the bottom and there
- // is an update expression.
- node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
- loop.Bind();
- }
-
- // Compile the test with the body as the true target and
- // preferred fall-through and with the break target as the
- // false target.
- ControlDestination dest(&body, node->break_target(), true);
+ break;
+ case DONT_KNOW:
+ // We have to compile the test expression if it can be reached by
+ // control flow falling out of the body or via continue.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (has_valid_frame()) {
+ ControlDestination dest(&body, node->break_target(), false);
LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-
- if (dest.false_was_fall_through()) {
- // If we got the break target as fall-through, the test may
- // have been unconditionally false (if there are no jumps to
- // the body).
- if (!body.is_linked()) break;
-
- // Otherwise, jump around the body on the fall through and
- // then bind the body target.
- node->break_target()->Unuse();
- node->break_target()->Jump();
- body.Bind();
- }
}
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ break;
+ }
- CheckStack(); // TODO(1222600): ignore if body contains calls.
- Visit(node->body());
+ DecrementLoopNesting();
+}
- // If there is an update expression, compile it if necessary.
- if (node->next() != NULL) {
+
+void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ WhileStatement");
+ CodeForStatementPosition(node);
+
+ // If the condition is always false and has no side effects, we do not
+ // need to compile anything.
+ ConditionAnalysis info = AnalyzeCondition(node->cond());
+ if (info == ALWAYS_FALSE) return;
+
+ // Do not duplicate conditions that may have function literal
+ // subexpressions. This can cause us to compile the function literal
+ // twice.
+ bool test_at_bottom = !node->may_have_function_literal();
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ IncrementLoopNesting();
+ JumpTarget body;
+ if (test_at_bottom) {
+ body.set_direction(JumpTarget::BIDIRECTIONAL);
+ }
+
+ // Based on the condition analysis, compile the test as necessary.
+ switch (info) {
+ case ALWAYS_TRUE:
+ // We will not compile the test expression. Label the top of the
+ // loop with the continue target.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ break;
+ case DONT_KNOW: {
+ if (test_at_bottom) {
+ // Continue is the test at the bottom, no need to label the test
+ // at the top. The body is a backward target.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ } else {
+ // Label the test at the top as the continue target. The body
+ // is a forward-only target.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ }
+ // Compile the test with the body as the true target and preferred
+ // fall-through and with the break target as the false target.
+ ControlDestination dest(&body, node->break_target(), true);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // If we got the break target as fall-through, the test may have
+ // been unconditionally false (if there are no jumps to the
+ // body).
+ if (!body.is_linked()) {
+ DecrementLoopNesting();
+ return;
+ }
+
+ // Otherwise, jump around the body on the fall through and then
+ // bind the body target.
+ node->break_target()->Unuse();
+ node->break_target()->Jump();
+ body.Bind();
+ }
+ break;
+ }
+ case ALWAYS_FALSE:
+ UNREACHABLE();
+ break;
+ }
+
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
+
+ // Based on the condition analysis, compile the backward jump as
+ // necessary.
+ switch (info) {
+ case ALWAYS_TRUE:
+ // The loop body has been labeled with the continue target.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ break;
+ case DONT_KNOW:
+ if (test_at_bottom) {
+ // If we have chosen to recompile the test at the bottom, then
+ // it is the continue target.
if (node->continue_target()->is_linked()) {
node->continue_target()->Bind();
}
-
- // Control can reach the update by falling out of the body or
- // by a continue.
if (has_valid_frame()) {
- // Record the source position of the statement as this code
- // which is after the code for the body actually belongs to
- // the loop statement and not the body.
- CodeForStatementPosition(node);
- Visit(node->next());
+ // The break target is the fall-through (body is a backward
+ // jump from here and thus an invalid fall-through).
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ }
+ } else {
+ // If we have chosen not to recompile the test at the bottom,
+ // jump back to the one at the top.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
}
}
+ break;
+ case ALWAYS_FALSE:
+ UNREACHABLE();
+ break;
+ }
- // Based on the condition analysis, compile the backward jump as
- // necessary.
- if (info == ALWAYS_TRUE) {
+ // The break target may be already bound (by the condition), or there
+ // may not be a valid frame. Bind it only if needed.
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ DecrementLoopNesting();
+}
+
+
+void CodeGenerator::VisitForStatement(ForStatement* node) {
+ ASSERT(!in_spilled_code());
+ Comment cmnt(masm_, "[ ForStatement");
+ CodeForStatementPosition(node);
+
+ // Compile the init expression if present.
+ if (node->init() != NULL) {
+ Visit(node->init());
+ }
+
+ // If the condition is always false and has no side effects, we do not
+ // need to compile anything else.
+ ConditionAnalysis info = AnalyzeCondition(node->cond());
+ if (info == ALWAYS_FALSE) return;
+
+ // Do not duplicate conditions that may have function literal
+ // subexpressions. This can cause us to compile the function literal
+ // twice.
+ bool test_at_bottom = !node->may_have_function_literal();
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ IncrementLoopNesting();
+
+ // Target for backward edge if no test at the bottom, otherwise
+ // unused.
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+
+ // Target for backward edge if there is a test at the bottom,
+ // otherwise used as target for test at the top.
+ JumpTarget body;
+ if (test_at_bottom) {
+ body.set_direction(JumpTarget::BIDIRECTIONAL);
+ }
+
+ // Based on the condition analysis, compile the test as necessary.
+ switch (info) {
+ case ALWAYS_TRUE:
+ // We will not compile the test expression. Label the top of the
+ // loop.
+ if (node->next() == NULL) {
+ // Use the continue target if there is no update expression.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ // Otherwise use the backward loop target.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ loop.Bind();
+ }
+ break;
+ case DONT_KNOW: {
+ if (test_at_bottom) {
+ // Continue is either the update expression or the test at the
+ // bottom, no need to label the test at the top.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ } else if (node->next() == NULL) {
+ // We are not recompiling the test at the bottom and there is no
+ // update expression.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ // We are not recompiling the test at the bottom and there is an
+ // update expression.
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ loop.Bind();
+ }
+ // Compile the test with the body as the true target and preferred
+ // fall-through and with the break target as the false target.
+ ControlDestination dest(&body, node->break_target(), true);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+ if (dest.false_was_fall_through()) {
+ // If we got the break target as fall-through, the test may have
+ // been unconditionally false (if there are no jumps to the
+ // body).
+ if (!body.is_linked()) {
+ DecrementLoopNesting();
+ return;
+ }
+
+ // Otherwise, jump around the body on the fall through and then
+ // bind the body target.
+ node->break_target()->Unuse();
+ node->break_target()->Jump();
+ body.Bind();
+ }
+ break;
+ }
+ case ALWAYS_FALSE:
+ UNREACHABLE();
+ break;
+ }
+
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ Visit(node->body());
+
+ // If there is an update expression, compile it if necessary.
+ if (node->next() != NULL) {
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+
+ // Control can reach the update by falling out of the body or by a
+ // continue.
+ if (has_valid_frame()) {
+ // Record the source position of the statement as this code which
+ // is after the code for the body actually belongs to the loop
+ // statement and not the body.
+ CodeForStatementPosition(node);
+ Visit(node->next());
+ }
+ }
+
+ // Based on the condition analysis, compile the backward jump as
+ // necessary.
+ switch (info) {
+ case ALWAYS_TRUE:
+ if (has_valid_frame()) {
+ if (node->next() == NULL) {
+ node->continue_target()->Jump();
+ } else {
+ loop.Jump();
+ }
+ }
+ break;
+ case DONT_KNOW:
+ if (test_at_bottom) {
+ if (node->continue_target()->is_linked()) {
+ // We can have dangling jumps to the continue target if there
+ // was no update expression.
+ node->continue_target()->Bind();
+ }
+ // Control can reach the test at the bottom by falling out of
+ // the body, by a continue in the body, or from the update
+ // expression.
+ if (has_valid_frame()) {
+ // The break target is the fall-through (body is a backward
+ // jump from here).
+ ControlDestination dest(&body, node->break_target(), false);
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ }
+ } else {
+ // Otherwise, jump back to the test at the top.
if (has_valid_frame()) {
if (node->next() == NULL) {
node->continue_target()->Jump();
@@ -3000,47 +3012,19 @@
loop.Jump();
}
}
- } else {
- ASSERT(info == DONT_KNOW); // ALWAYS_FALSE cannot reach here.
- if (test_at_bottom) {
- if (node->continue_target()->is_linked()) {
- // We can have dangling jumps to the continue target if
- // there was no update expression.
- node->continue_target()->Bind();
- }
- // Control can reach the test at the bottom by falling out
- // of the body, by a continue in the body, or from the
- // update expression.
- if (has_valid_frame()) {
- // The break target is the fall-through (body is a
- // backward jump from here).
- ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
- }
- } else {
- // Otherwise, jump back to the test at the top.
- if (has_valid_frame()) {
- if (node->next() == NULL) {
- node->continue_target()->Jump();
- } else {
- loop.Jump();
- }
- }
- }
- }
-
- // The break target may be already bound (by the condition), or
- // there may not be a valid frame. Bind it only if needed.
- if (node->break_target()->is_linked()) {
- node->break_target()->Bind();
}
break;
- }
+ case ALWAYS_FALSE:
+ UNREACHABLE();
+ break;
}
+ // The break target may be already bound (by the condition), or
+ // there may not be a valid frame. Bind it only if needed.
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
DecrementLoopNesting();
- node->continue_target()->Unuse();
- node->break_target()->Unuse();
}
@@ -3234,10 +3218,10 @@
}
-void CodeGenerator::VisitTryCatch(TryCatch* node) {
+void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
ASSERT(!in_spilled_code());
VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryCatch");
+ Comment cmnt(masm_, "[ TryCatchStatement");
CodeForStatementPosition(node);
JumpTarget try_block;
@@ -3370,10 +3354,10 @@
}
-void CodeGenerator::VisitTryFinally(TryFinally* node) {
+void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
ASSERT(!in_spilled_code());
VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ TryFinally");
+ Comment cmnt(masm_, "[ TryFinallyStatement");
CodeForStatementPosition(node);
// State: Used to keep track of reason for entering the finally
@@ -3580,11 +3564,9 @@
ASSERT(boilerplate->IsBoilerplate());
frame_->SyncRange(0, frame_->element_count() - 1);
- // Push the boilerplate on the stack.
- frame_->EmitPush(Immediate(boilerplate));
-
// Create a new closure.
frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(boilerplate));
Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
frame_->Push(&result);
}
@@ -5172,11 +5154,10 @@
Result scratch1 = allocator()->Allocate();
Result scratch2 = allocator()->Allocate();
Result heap_number = allocator()->Allocate();
- FloatingPointHelper::AllocateHeapNumber(masm_,
- call_runtime.entry_label(),
- scratch1.reg(),
- scratch2.reg(),
- heap_number.reg());
+ __ AllocateHeapNumber(heap_number.reg(),
+ scratch1.reg(),
+ scratch2.reg(),
+ call_runtime.entry_label());
scratch1.Unuse();
scratch2.Unuse();
@@ -5338,8 +5319,8 @@
switch (op) {
case Token::SUB: {
bool overwrite =
- (node->AsBinaryOperation() != NULL &&
- node->AsBinaryOperation()->ResultOverwriteAllowed());
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnarySubStub stub(overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
@@ -6505,11 +6486,7 @@
__ j(not_equal, &true_result);
__ fldz();
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fucompp();
- __ push(eax);
- __ fnstsw_ax();
- __ sahf();
- __ pop(eax);
+ __ FCmp();
__ j(zero, &false_result);
// Fall through to |true_result|.
@@ -6523,6 +6500,116 @@
}
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in edx and right in eax.
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ if (!(left.is(edx) && right.is(eax))) {
+ if (left.is(eax) && right.is(edx)) {
+ if (IsOperationCommutative()) {
+ SetArgsReversed();
+ } else {
+ __ xchg(left, right);
+ }
+ } else if (left.is(edx)) {
+ __ mov(eax, right);
+ } else if (left.is(eax)) {
+ if (IsOperationCommutative()) {
+ __ mov(edx, right);
+ SetArgsReversed();
+ } else {
+ __ mov(edx, left);
+ __ mov(eax, right);
+ }
+ } else if (right.is(edx)) {
+ if (IsOperationCommutative()) {
+ __ mov(eax, left);
+ SetArgsReversed();
+ } else {
+ __ mov(eax, right);
+ __ mov(edx, left);
+ }
+ } else if (right.is(eax)) {
+ __ mov(edx, left);
+ } else {
+ __ mov(edx, left);
+ __ mov(eax, right);
+ }
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Smi* right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(Immediate(right));
+ } else {
+ // Adapt arguments to the calling convention left in edx and right in eax.
+ if (left.is(edx)) {
+ __ mov(eax, Immediate(right));
+ } else if (left.is(eax) && IsOperationCommutative()) {
+ __ mov(edx, Immediate(right));
+ SetArgsReversed();
+ } else {
+ __ mov(edx, left);
+ __ mov(eax, Immediate(right));
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Smi* left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(Immediate(left));
+ __ push(right);
+ } else {
+ // Adapt arguments to the calling convention left in edx and right in eax.
+ bool is_commutative = (op_ == (Token::ADD) || (op_ == Token::MUL));
+ if (right.is(eax)) {
+ __ mov(edx, Immediate(left));
+ } else if (right.is(edx) && is_commutative) {
+ __ mov(eax, Immediate(left));
+ } else {
+ __ mov(edx, Immediate(left));
+ __ mov(eax, right);
+ }
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (eax <op> ebx) and
// leave result in register eax.
@@ -6670,22 +6757,23 @@
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
- if (flags_ == SMI_CODE_IN_STUB) {
- // The fast case smi code wasn't inlined in the stub caller
- // code. Generate it here to speed up common operations.
- Label slow;
- __ mov(ebx, Operand(esp, 1 * kPointerSize)); // get y
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // get x
- GenerateSmiCode(masm, &slow);
- __ ret(2 * kPointerSize); // remove both operands
+ __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
+ // Generate fast case smi code if requested. This flag is set when the fast
+ // case smi code is not generated by the caller. Generating it here will speed
+ // up common operations.
+ if (HasSmiCodeInStub()) {
+ Label slow;
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ GenerateSmiCode(masm, &slow);
+ GenerateReturn(masm);
// Too bad. The fast case smi code didn't succeed.
__ bind(&slow);
}
- // Setup registers.
- __ mov(eax, Operand(esp, 1 * kPointerSize)); // get y
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // get x
+ // Make sure the arguments are in edx and eax.
+ GenerateLoadArguments(masm);
// Floating point case.
switch (op_) {
@@ -6719,19 +6807,20 @@
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &skip_allocation, not_taken);
// Fall through!
- case NO_OVERWRITE:
- FloatingPointHelper::AllocateHeapNumber(masm,
- &call_runtime,
- ecx,
- edx,
- eax);
+ case NO_OVERWRITE: {
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
+ // Now eax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(eax, ebx);
__ bind(&skip_allocation);
break;
+ }
default: UNREACHABLE();
}
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(2 * kPointerSize);
-
+ GenerateReturn(masm);
} else { // SSE2 not available, use FPU.
FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
// Allocate a heap number, if needed.
@@ -6747,11 +6836,12 @@
__ j(not_zero, &skip_allocation, not_taken);
// Fall through!
case NO_OVERWRITE:
- FloatingPointHelper::AllocateHeapNumber(masm,
- &call_runtime,
- ecx,
- edx,
- eax);
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
+ // Now eax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(eax, ebx);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
@@ -6766,7 +6856,7 @@
default: UNREACHABLE();
}
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(2 * kPointerSize);
+ GenerateReturn(masm);
}
}
case Token::MOD: {
@@ -6800,18 +6890,14 @@
// Check if right operand is int32.
__ fist_s(Operand(esp, 0 * kPointerSize));
__ fild_s(Operand(esp, 0 * kPointerSize));
- __ fucompp();
- __ fnstsw_ax();
- __ sahf();
+ __ FCmp();
__ j(not_zero, &operand_conversion_failure);
__ j(parity_even, &operand_conversion_failure);
// Check if left operand is int32.
__ fist_s(Operand(esp, 1 * kPointerSize));
__ fild_s(Operand(esp, 1 * kPointerSize));
- __ fucompp();
- __ fnstsw_ax();
- __ sahf();
+ __ FCmp();
__ j(not_zero, &operand_conversion_failure);
__ j(parity_even, &operand_conversion_failure);
}
@@ -6858,8 +6944,7 @@
__ j(not_zero, &skip_allocation, not_taken);
// Fall through!
case NO_OVERWRITE:
- FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
- ecx, edx, eax);
+ __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
@@ -6899,8 +6984,20 @@
}
// If all else fails, use the runtime system to get the correct
- // result.
+ // result. If arguments was passed in registers now place them on the
+ // stack in the correct order.
__ bind(&call_runtime);
+ if (HasArgumentsInRegisters()) {
+ __ pop(ecx);
+ if (HasArgumentsReversed()) {
+ __ push(eax);
+ __ push(edx);
+ } else {
+ __ push(edx);
+ __ push(eax);
+ }
+ __ push(ecx);
+ }
switch (op_) {
case Token::ADD: {
// Test for string arguments before calling runtime.
@@ -6977,22 +7074,23 @@
}
-void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
- Label* need_gc,
- Register scratch1,
- Register scratch2,
- Register result) {
- // Allocate heap number in new space.
- __ AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+ // If arguments are not passed in registers read them from the stack.
+ if (!HasArgumentsInRegisters()) {
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ }
+}
- // Set the map.
- __ mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+ // If arguments are not passed in registers remove them from the stack before
+ // returning.
+ if (!HasArgumentsInRegisters()) {
+ __ ret(2 * kPointerSize); // Remove both operands
+ } else {
+ __ ret(0);
+ }
}
@@ -7152,7 +7250,7 @@
} else {
__ mov(edx, Operand(eax));
// edx: operand
- FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx, eax);
+ __ AllocateHeapNumber(eax, ebx, ecx, &undo);
// eax: allocated 'empty' number
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 142a5a1..a37bffe 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -294,6 +294,15 @@
Handle<Script> script,
bool is_eval);
+ // Printing of AST, etc. as requested by flags.
+ static void MakeCodePrologue(FunctionLiteral* fun);
+
+ // Allocate and install the code.
+ static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
+ MacroAssembler* masm,
+ Code::Flags flags,
+ Handle<Script> script);
+
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
@@ -303,6 +312,8 @@
bool is_toplevel,
Handle<Script> script);
+ static void RecordPositions(MacroAssembler* masm, int pos);
+
// Accessors
MacroAssembler* masm() { return masm_; }
@@ -385,7 +396,7 @@
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
- Operand ContextOperand(Register context, int index) const {
+ static Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
@@ -396,7 +407,7 @@
JumpTarget* slow);
// Expressions
- Operand GlobalObject() const {
+ static Operand GlobalObject() {
return ContextOperand(esi, Context::GLOBAL_INDEX);
}
@@ -500,10 +511,11 @@
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
+ static Handle<Code> ComputeLazyCompile(int argc);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
- Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+ static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
@@ -548,6 +560,14 @@
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
+ // Simple condition analysis.
+ enum ConditionAnalysis {
+ ALWAYS_TRUE,
+ ALWAYS_FALSE,
+ DONT_KNOW
+ };
+ ConditionAnalysis AnalyzeCondition(Expression* cond);
+
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
@@ -597,6 +617,8 @@
friend class JumpTarget;
friend class Reference;
friend class Result;
+ friend class FastCodeGenerator;
+ friend class CodeGenSelector;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
@@ -604,47 +626,74 @@
};
-// Flag that indicates whether or not the code that handles smi arguments
-// should be placed in the stub, inlined, or omitted entirely.
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
+// Flag that indicates whether how to generate code for the stub.
enum GenericBinaryFlags {
- SMI_CODE_IN_STUB,
- SMI_CODE_INLINED
+ NO_GENERIC_BINARY_FLAGS = 0,
+ NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
};
class GenericBinaryOpStub: public CodeStub {
public:
- GenericBinaryOpStub(Token::Value op,
+ GenericBinaryOpStub(Token::Value operation,
OverwriteMode mode,
GenericBinaryFlags flags)
- : op_(op), mode_(mode), flags_(flags) {
+ : op_(operation),
+ mode_(mode),
+ flags_(flags),
+ args_in_registers_(false),
+ args_reversed_(false) {
use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+ // Generate code to call the stub with the supplied arguments. This will add
+ // code at the call site to prepare arguments either in registers or on the
+ // stack together with the actual call.
+ void GenerateCall(MacroAssembler* masm, Register left, Register right);
+ void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+ void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
private:
Token::Value op_;
OverwriteMode mode_;
GenericBinaryFlags flags_;
+ bool args_in_registers_; // Arguments passed in registers not on the stack.
+ bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
const char* GetName();
#ifdef DEBUG
void Print() {
- PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+ PrintF("GenericBinaryOpStub (op %s), "
+ "(mode %d, flags %d, registers %d, reversed %d)\n",
Token::String(op_),
static_cast<int>(mode_),
- static_cast<int>(flags_));
+ static_cast<int>(flags_),
+ static_cast<int>(args_in_registers_),
+ static_cast<int>(args_reversed_));
}
#endif
- // Minor key encoding in 16 bits FSOOOOOOOOOOOOMM.
+ // Minor key encoding in 16 bits FRASOOOOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 12> {};
- class SSE3Bits: public BitField<bool, 14, 1> {};
+ class OpBits: public BitField<Token::Value, 2, 10> {};
+ class SSE3Bits: public BitField<bool, 12, 1> {};
+ class ArgsInRegistersBits: public BitField<bool, 13, 1> {};
+ class ArgsReversedBits: public BitField<bool, 14, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
Major MajorKey() { return GenericBinaryOp; }
@@ -653,9 +702,30 @@
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| FlagBits::encode(flags_)
- | SSE3Bits::encode(use_sse3_);
+ | SSE3Bits::encode(use_sse3_)
+ | ArgsInRegistersBits::encode(args_in_registers_)
+ | ArgsReversedBits::encode(args_reversed_);
}
+
void Generate(MacroAssembler* masm);
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+
+ bool ArgsInRegistersSupported() {
+ return ((op_ == Token::ADD) || (op_ == Token::SUB)
+ || (op_ == Token::MUL) || (op_ == Token::DIV))
+ && flags_ != NO_SMI_CODE_IN_STUB;
+ }
+ bool IsOperationCommutative() {
+ return (op_ == Token::ADD) || (op_ == Token::MUL);
+ }
+
+ void SetArgsInRegisters() { args_in_registers_ = true; }
+ void SetArgsReversed() { args_reversed_ = true; }
+ bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
+ bool HasArgumentsInRegisters() { return args_in_registers_; }
+ bool HasArgumentsReversed() { return args_reversed_; }
};
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 7e0dfd1..2d20117 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -63,7 +63,7 @@
// having been patched with a call instruction.
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsCallInstruction();
+ return rinfo->IsPatchedReturnSequence();
}
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 458844e..adedf34 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -124,6 +124,14 @@
};
+static const char* conditional_move_mnem[] = {
+ /*0*/ "cmovo", "cmovno", "cmovc", "cmovnc",
+ /*4*/ "cmovz", "cmovnz", "cmovna", "cmova",
+ /*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
+ /*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"
+};
+
+
enum InstructionType {
NO_INSTR,
ZERO_OPERANDS_INSTR,
@@ -311,6 +319,7 @@
int JumpConditional(byte* data, const char* comment);
int JumpConditionalShort(byte* data, const char* comment);
int SetCC(byte* data);
+ int CMov(byte* data);
int FPUInstruction(byte* data);
void AppendToBuffer(const char* format, ...);
@@ -615,6 +624,16 @@
// Returns number of bytes used, including *data.
+int DisassemblerIA32::CMov(byte* data) {
+ assert(*data == 0x0F);
+ byte cond = *(data + 1) & 0x0F;
+ const char* mnem = conditional_move_mnem[cond];
+ int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
+ return 2 + op_size; // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
int DisassemblerIA32::FPUInstruction(byte* data) {
byte b1 = *data;
byte b2 = *(data + 1);
@@ -861,6 +880,8 @@
data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
} else if ((f0byte & 0xF0) == 0x90) {
data += SetCC(data);
+ } else if ((f0byte & 0xF0) == 0x40) {
+ data += CMov(data);
} else {
data += 2;
if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
@@ -956,6 +977,19 @@
AppendToBuffer("mov_w ");
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfCPURegister(regop));
+ } else if (*data == 0x0F) {
+ data++;
+ if (*data == 0x2F) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("comisd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else {
+ UnimplementedInstruction();
+ }
} else {
UnimplementedInstruction();
}
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
new file mode 100644
index 0000000..663d136
--- /dev/null
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -0,0 +1,545 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+#include "parser.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right, with the
+// return address on top of them. The actual argument count matches the
+// formal parameter count expected by the function.
+//
+// The live registers are:
+// o edi: the JS function object being called (ie, ourselves)
+// o esi: our context
+// o ebp: our caller's frame pointer
+// o esp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-ia32.h for its layout.
+void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+ function_ = fun;
+ SetFunctionPosition(fun);
+
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = fun->scope()->num_stack_slots();
+ for (int i = 0; i < locals_count; i++) {
+ __ push(Immediate(Factory::undefined_value()));
+ }
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ Label ok;
+ ExternalReference stack_guard_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+ __ j(above_equal, &ok, taken);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(fun->scope()->declarations());
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ VisitStatements(fun->body());
+ }
+
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ // Emit a 'return undefined' in case control fell off the end of the
+ // body.
+ __ mov(eax, Factory::undefined_value());
+ SetReturnPosition(fun);
+
+ if (FLAG_trace) {
+ __ push(eax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ __ RecordJSReturn();
+ // Do not use the leave instruction here because it is too short to
+ // patch with the code required by the debugger.
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+ }
+}
+
+
+void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ __ push(esi); // The context is the first argument.
+ __ push(Immediate(pairs));
+ __ push(Immediate(Smi::FromInt(is_eval_ ? 1 : 0)));
+ __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FastCodeGenerator::VisitBlock(Block* stmt) {
+ Comment cmnt(masm_, "[ Block");
+ SetStatementPosition(stmt);
+ VisitStatements(stmt->statements());
+}
+
+
+void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+ Comment cmnt(masm_, "[ ExpressionStatement");
+ SetStatementPosition(stmt);
+ Visit(stmt->expression());
+}
+
+
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+ Comment cmnt(masm_, "[ ReturnStatement");
+ SetStatementPosition(stmt);
+ Expression* expr = stmt->expression();
+ Visit(expr);
+
+ // Complete the statement based on the location of the subexpression.
+ Location source = expr->location();
+ ASSERT(!source.is_nowhere());
+ if (source.is_temporary()) {
+ __ pop(eax);
+ } else {
+ ASSERT(source.is_constant());
+ ASSERT(expr->AsLiteral() != NULL);
+ __ mov(eax, expr->AsLiteral()->handle());
+ }
+ if (FLAG_trace) {
+ __ push(eax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ __ RecordJSReturn();
+
+ // Do not use the leave instruction here because it is too short to
+ // patch with the code required by the debugger.
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+}
+
+
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+ Comment cmnt(masm_, "[ FunctionLiteral");
+
+ // Build the function boilerplate and instantiate it.
+ Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+ if (HasStackOverflow()) return;
+
+ ASSERT(boilerplate->IsBoilerplate());
+
+ // Create a new closure.
+ __ push(esi);
+ __ push(Immediate(boilerplate));
+ __ CallRuntime(Runtime::kNewClosure, 2);
+
+ if (expr->location().is_temporary()) {
+ __ push(eax);
+ } else {
+ ASSERT(expr->location().is_nowhere());
+ }
+}
+
+
+void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ Expression* rewrite = expr->var()->rewrite();
+ if (rewrite == NULL) {
+ Comment cmnt(masm_, "Global variable");
+ // Use inline caching. Variable name is passed in ecx and the global
+ // object on the stack.
+ __ push(CodeGenerator::GlobalObject());
+ __ mov(ecx, expr->name());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+
+ // A test eax instruction following the call is used by the IC to
+ // indicate that the inobject property case was inlined. Ensure there
+ // is no test eax instruction here. Remember that the assembler may
+ // choose to do peephole optimization (eg, push/pop elimination).
+ if (expr->location().is_temporary()) {
+ // Replace the global object with the result.
+ __ mov(Operand(esp, 0), eax);
+ } else {
+ ASSERT(expr->location().is_nowhere());
+ __ add(Operand(esp), Immediate(kPointerSize));
+ }
+
+ } else {
+ Comment cmnt(masm_, "Stack slot");
+ Slot* slot = rewrite->AsSlot();
+ ASSERT(slot != NULL);
+ if (expr->location().is_temporary()) {
+ __ push(Operand(ebp, SlotOffset(slot)));
+ } else {
+ ASSERT(expr->location().is_nowhere());
+ }
+ }
+}
+
+
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExp Literal");
+ Label done;
+ // Registers will be used as follows:
+ // edi = JS function.
+ // ebx = literals array.
+ // eax = regexp literal.
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ mov(eax, FieldOperand(ebx, literal_offset));
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, &done);
+ // Create regexp literal using runtime function
+ // Result will be in eax.
+ __ push(ebx);
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(expr->pattern()));
+ __ push(Immediate(expr->flags()));
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ // Label done:
+ __ bind(&done);
+ if (expr->location().is_temporary()) {
+ __ push(eax);
+ } else {
+ ASSERT(expr->location().is_nowhere());
+ }
+}
+
+
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+ Label make_clone;
+
+ // Fetch the function's literals array.
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(ebx, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ // Check if the literal's boilerplate has been instantiated.
+ int offset =
+ FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
+ __ mov(eax, FieldOperand(ebx, offset));
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, &make_clone);
+
+ // Instantiate the boilerplate.
+ __ push(ebx);
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(expr->literals()));
+ __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+
+ __ bind(&make_clone);
+ // Clone the boilerplate.
+ __ push(eax);
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ } else {
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ ZoneList<Expression*>* subexprs = expr->values();
+ for (int i = 0, len = subexprs->length(); i < len; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (subexpr->AsLiteral() != NULL ||
+ CompileTimeValue::IsCompileTimeValue(subexpr)) {
+ continue;
+ }
+
+ if (!result_saved) {
+ __ push(eax);
+ result_saved = true;
+ }
+ Visit(subexpr);
+ ASSERT(subexpr->location().is_temporary());
+
+ // Store the subexpression value in the array's elements.
+ __ pop(eax); // Subexpression value.
+ __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
+ __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ mov(FieldOperand(ebx, offset), eax);
+
+ // Update the write barrier for the array store.
+ __ RecordWrite(ebx, offset, eax, ecx);
+ }
+
+ Location destination = expr->location();
+ if (destination.is_nowhere() && result_saved) {
+ __ add(Operand(esp), Immediate(kPointerSize));
+ } else if (destination.is_temporary() && !result_saved) {
+ __ push(eax);
+ }
+}
+
+
+void FastCodeGenerator::VisitAssignment(Assignment* expr) {
+ Comment cmnt(masm_, "[ Assignment");
+ ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
+ Expression* rhs = expr->value();
+ Visit(rhs);
+
+ // Left-hand side can only be a global or a (parameter or local) slot.
+ Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+ ASSERT(var != NULL);
+ ASSERT(var->is_global() || var->slot() != NULL);
+
+ // Complete the assignment based on the location of the right-hand-side
+ // value and the desired location of the assignment value.
+ Location destination = expr->location();
+ Location source = rhs->location();
+ ASSERT(!destination.is_constant());
+ ASSERT(!source.is_nowhere());
+
+ if (var->is_global()) {
+ // Assignment to a global variable, use inline caching. Right-hand-side
+ // value is passed in eax, variable name in ecx, and the global object
+ // on the stack.
+ if (source.is_temporary()) {
+ __ pop(eax);
+ } else {
+ ASSERT(source.is_constant());
+ ASSERT(rhs->AsLiteral() != NULL);
+ __ mov(eax, rhs->AsLiteral()->handle());
+ }
+ __ mov(ecx, var->name());
+ __ push(CodeGenerator::GlobalObject());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Overwrite the global object on the stack with the result if needed.
+ if (destination.is_temporary()) {
+ __ mov(Operand(esp, 0), eax);
+ } else {
+ ASSERT(destination.is_nowhere());
+ __ add(Operand(esp), Immediate(kPointerSize));
+ }
+
+ } else {
+ // Local or parameter assignment.
+ if (source.is_temporary()) {
+ if (destination.is_temporary()) {
+ // Case 'temp1 <- (var = temp0)'. Preserve right-hand-side
+ // temporary on the stack.
+ __ mov(eax, Operand(esp, 0));
+ __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
+ } else {
+ ASSERT(destination.is_nowhere());
+ // Case 'var = temp'. Discard right-hand-side temporary.
+ __ pop(Operand(ebp, SlotOffset(var->slot())));
+ }
+ } else {
+ ASSERT(source.is_constant());
+ ASSERT(rhs->AsLiteral() != NULL);
+ // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
+ // discarded result. Always perform the assignment.
+ __ mov(eax, rhs->AsLiteral()->handle());
+ __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
+ if (destination.is_temporary()) {
+ // Case 'temp <- (var = constant)'. Save result.
+ __ push(eax);
+ }
+ }
+ }
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+ Expression* fun = expr->expression();
+ ZoneList<Expression*>* args = expr->arguments();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+ ASSERT(var != NULL && !var->is_this() && var->is_global());
+ ASSERT(!var->is_possibly_eval());
+
+ __ push(Immediate(var->name()));
+ // Push global object (receiver).
+ __ push(CodeGenerator::GlobalObject());
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ ASSERT(!args->at(i)->location().is_nowhere());
+ if (args->at(i)->location().is_constant()) {
+ ASSERT(args->at(i)->AsLiteral() != NULL);
+ __ push(Immediate(args->at(i)->AsLiteral()->handle()));
+ }
+ }
+ // Record source position for debugger
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ if (expr->location().is_temporary()) {
+ __ mov(Operand(esp, 0), eax);
+ } else {
+ ASSERT(expr->location().is_nowhere());
+ __ add(Operand(esp), Immediate(kPointerSize));
+ }
+}
+
+
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ Comment cmnt(masm_, "[ CallRuntime");
+ ZoneList<Expression*>* args = expr->arguments();
+ Runtime::Function* function = expr->function();
+
+ ASSERT(function != NULL);
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ ASSERT(!args->at(i)->location().is_nowhere());
+ if (args->at(i)->location().is_constant()) {
+ ASSERT(args->at(i)->AsLiteral() != NULL);
+ __ push(Immediate(args->at(i)->AsLiteral()->handle()));
+ } else {
+ ASSERT(args->at(i)->location().is_temporary());
+ // If location is temporary, it is already on the stack,
+ // so nothing to do here.
+ }
+ }
+
+ __ CallRuntime(function, arg_count);
+ if (expr->location().is_temporary()) {
+ __ push(eax);
+ } else {
+ ASSERT(expr->location().is_nowhere());
+ }
+}
+
+
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+ // Compile a short-circuited boolean or operation in a non-test
+ // context.
+ ASSERT(expr->op() == Token::OR);
+ // Compile (e0 || e1) as if it were
+ // (let (temp = e0) temp ? temp : e1).
+
+ Label eval_right, done;
+ Location destination = expr->location();
+ ASSERT(!destination.is_constant());
+
+ Expression* left = expr->left();
+ Location left_source = left->location();
+ ASSERT(!left_source.is_nowhere());
+
+ Expression* right = expr->right();
+ Location right_source = right->location();
+ ASSERT(!right_source.is_nowhere());
+
+ Visit(left);
+ // Use the shared ToBoolean stub to find the boolean value of the
+ // left-hand subexpression. Load the value into eax to perform some
+ // inlined checks assumed by the stub.
+ if (left_source.is_temporary()) {
+ if (destination.is_temporary()) {
+ // Copy the left-hand value into eax because we may need it as the
+ // final result.
+ __ mov(eax, Operand(esp, 0));
+ } else {
+ // Pop the left-hand value into eax because we will not need it as the
+ // final result.
+ __ pop(eax);
+ }
+ } else {
+ // Load the left-hand value into eax. Put it on the stack if we may
+ // need it.
+ ASSERT(left->AsLiteral() != NULL);
+ __ mov(eax, left->AsLiteral()->handle());
+ if (destination.is_temporary()) __ push(eax);
+ }
+ // The left-hand value is in eax. It is also on the stack iff the
+ // destination location is temporary.
+
+ // Perform fast checks assumed by the stub.
+ __ cmp(eax, Factory::undefined_value()); // The undefined value is false.
+ __ j(equal, &eval_right);
+ __ cmp(eax, Factory::true_value()); // True is true.
+ __ j(equal, &done);
+ __ cmp(eax, Factory::false_value()); // False is false.
+ __ j(equal, &eval_right);
+ ASSERT(kSmiTag == 0);
+ __ test(eax, Operand(eax)); // The smi zero is false.
+ __ j(zero, &eval_right);
+ __ test(eax, Immediate(kSmiTagMask)); // All other smis are true.
+ __ j(zero, &done);
+
+ // Call the stub for all other cases.
+ __ push(eax);
+ ToBooleanStub stub;
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax)); // The stub returns nonzero for true.
+ __ j(not_zero, &done);
+
+ __ bind(&eval_right);
+ // Discard the left-hand value if present on the stack.
+ if (destination.is_temporary()) {
+ __ add(Operand(esp), Immediate(kPointerSize));
+ }
+ Visit(right);
+
+ // Save or discard the right-hand value as needed.
+ if (destination.is_temporary() && right_source.is_constant()) {
+ ASSERT(right->AsLiteral() != NULL);
+ __ push(Immediate(right->AsLiteral()->handle()));
+ } else if (destination.is_nowhere() && right_source.is_temporary()) {
+ __ add(Operand(esp), Immediate(kPointerSize));
+ }
+
+ __ bind(&done);
+}
+
+
+} } // namespace v8::internal
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index f7369a8..3aa3c34 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -298,11 +298,10 @@
__ shl(eax, kSmiTagSize);
__ ret(0);
-
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
- KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+ Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
__ bind(&check_string);
// The key is not a smi.
@@ -343,6 +342,166 @@
}
+void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
+ ExternalArrayType array_type) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : key
+ // -- esp[8] : receiver
+ // -----------------------------------
+ Label slow, failed_allocation;
+
+ // Load name and receiver.
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+
+ // Check that the key is a smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+
+ // Get the map of the receiver.
+ __ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks.
+ __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow, not_taken);
+
+ // Get the instance type from the map of the receiver.
+ __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ // Check that the object is a JS object.
+ __ cmp(edx, JS_OBJECT_TYPE);
+ __ j(not_equal, &slow, not_taken);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ // eax: index (as a smi)
+ // ecx: JSObject
+ __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+ Handle<Map> map(Heap::MapForExternalArrayType(array_type));
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(map));
+ __ j(not_equal, &slow, not_taken);
+
+ // Check that the index is in range.
+ __ sar(eax, kSmiTagSize); // Untag the index.
+ __ cmp(eax, FieldOperand(ecx, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ // eax: untagged index
+ // ecx: elements array
+ __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+ // ecx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ __ movsx_b(eax, Operand(ecx, eax, times_1, 0));
+ break;
+ case kExternalUnsignedByteArray:
+ __ mov_b(eax, Operand(ecx, eax, times_1, 0));
+ break;
+ case kExternalShortArray:
+ __ movsx_w(eax, Operand(ecx, eax, times_2, 0));
+ break;
+ case kExternalUnsignedShortArray:
+ __ mov_w(eax, Operand(ecx, eax, times_2, 0));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ mov(eax, Operand(ecx, eax, times_4, 0));
+ break;
+ case kExternalFloatArray:
+ __ fld_s(Operand(ecx, eax, times_4, 0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // For integer array types:
+ // eax: value
+ // For floating-point array type:
+ // FP(0): value
+
+ if (array_type == kExternalIntArray ||
+ array_type == kExternalUnsignedIntArray) {
+ // For the Int and UnsignedInt array types, we need to see whether
+ // the value can be represented in a Smi. If not, we need to convert
+ // it to a HeapNumber.
+ Label box_int;
+ if (array_type == kExternalIntArray) {
+ // See Smi::IsValid for why this works.
+ __ mov(ebx, eax);
+ __ add(Operand(ebx), Immediate(0x40000000));
+ __ cmp(ebx, 0x80000000);
+ __ j(above_equal, &box_int);
+ } else {
+ ASSERT_EQ(array_type, kExternalUnsignedIntArray);
+ // The test is different for unsigned int values. Since we need
+ // the Smi-encoded result to be treated as unsigned, we can't
+ // handle either of the top two bits being set in the value.
+ __ test(eax, Immediate(0xC0000000));
+ __ j(not_zero, &box_int);
+ }
+
+ __ shl(eax, kSmiTagSize);
+ __ ret(0);
+
+ __ bind(&box_int);
+
+ // Allocate a HeapNumber for the int and perform int-to-double
+ // conversion.
+ if (array_type == kExternalIntArray) {
+ __ push(eax);
+ __ fild_s(Operand(esp, 0));
+ __ pop(eax);
+ } else {
+ ASSERT(array_type == kExternalUnsignedIntArray);
+ // Need to zero-extend the value.
+ // There's no fild variant for unsigned values, so zero-extend
+ // to a 64-bit int manually.
+ __ push(Immediate(0));
+ __ push(eax);
+ __ fild_d(Operand(esp, 0));
+ __ pop(eax);
+ __ pop(eax);
+ }
+ // FP(0): value
+ __ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
+ // Set the value.
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(0);
+ } else if (array_type == kExternalFloatArray) {
+ // For the floating-point array type, we need to always allocate a
+ // HeapNumber.
+ __ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
+ // Set the value.
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(0);
+ } else {
+ __ shl(eax, kSmiTagSize);
+ __ ret(0);
+ }
+
+ // If we fail allocation of the HeapNumber, we still have a value on
+ // top of the FPU stack. Remove it.
+ __ bind(&failed_allocation);
+ __ ffree();
+ __ fincstp();
+ // Fall through to slow case.
+
+ // Slow case: Load name and receiver from stack and jump to runtime.
+ __ bind(&slow);
+ __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+ Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+}
+
+
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
@@ -396,15 +555,9 @@
// ebx: index (as a smi)
__ j(below, &fast, taken);
- // Slow case: Push extra copies of the arguments (3).
+ // Slow case: call runtime.
__ bind(&slow);
- __ pop(ecx);
- __ push(Operand(esp, 1 * kPointerSize));
- __ push(Operand(esp, 1 * kPointerSize));
- __ push(eax);
- __ push(ecx);
- // Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+ Generate(masm, ExternalReference(Runtime::kSetProperty));
// Check whether the elements is a pixel array.
// eax: value
@@ -424,14 +577,11 @@
__ mov(edx, eax); // Save the value.
__ sar(eax, kSmiTagSize); // Untag the value.
{ // Clamp the value to [0..255].
- Label done, is_negative;
+ Label done;
__ test(eax, Immediate(0xFFFFFF00));
__ j(zero, &done);
- __ j(negative, &is_negative);
- __ mov(eax, Immediate(255));
- __ jmp(&done);
- __ bind(&is_negative);
- __ xor_(eax, Operand(eax)); // Clear eax.
+ __ setcc(negative, eax); // 1 if negative, 0 if positive.
+ __ dec_b(eax); // 0 if negative, 255 if positive.
__ bind(&done);
}
__ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
@@ -458,7 +608,6 @@
__ sub(Operand(ebx), Immediate(1 << kSmiTagSize)); // decrement ebx again
__ jmp(&fast);
-
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode; if it is the
// length is always a smi.
@@ -490,6 +639,201 @@
}
+void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
+ ExternalArrayType array_type) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- esp[0] : return address
+ // -- esp[4] : key
+ // -- esp[8] : receiver
+ // -----------------------------------
+ Label slow, check_heap_number;
+
+ // Get the receiver from the stack.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ // Check that the object isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+ // Get the map from the receiver.
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
+ __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow);
+ // Get the key from the stack.
+ __ mov(ebx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
+ // Check that the key is a smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow);
+ // Get the instance type from the map of the receiver.
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ // Check that the object is a JS object.
+ __ cmp(ecx, JS_OBJECT_TYPE);
+ __ j(not_equal, &slow);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ // eax: value
+ // edx: JSObject
+ // ebx: index (as a smi)
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+ Handle<Map> map(Heap::MapForExternalArrayType(array_type));
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(map));
+ __ j(not_equal, &slow);
+
+ // Check that the index is in range.
+ __ sar(ebx, kSmiTagSize); // Untag the index.
+ __ cmp(ebx, FieldOperand(ecx, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ // Handle both smis and HeapNumbers in the fast path. Go to the
+ // runtime for all other kinds of values.
+ // eax: value
+ // ecx: elements array
+ // ebx: untagged index
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_equal, &check_heap_number);
+ // smi case
+ __ mov(edx, eax); // Save the value.
+ __ sar(eax, kSmiTagSize); // Untag the value.
+ __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+ // ecx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ mov(Operand(ecx, ebx, times_4, 0), eax);
+ break;
+ case kExternalFloatArray:
+ // Need to perform int-to-float conversion.
+ __ push(eax);
+ __ fild_s(Operand(esp, 0));
+ __ pop(eax);
+ __ fstp_s(Operand(ecx, ebx, times_4, 0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ mov(eax, edx); // Return the original value.
+ __ ret(0);
+
+ __ bind(&check_heap_number);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ __ j(not_equal, &slow);
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ mov(edx, eax); // Save the value.
+ __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+ // ebx: untagged index
+ // ecx: base pointer of external storage
+ // top of FPU stack: value
+ if (array_type == kExternalFloatArray) {
+ __ fstp_s(Operand(ecx, ebx, times_4, 0));
+ __ mov(eax, edx); // Return the original value.
+ __ ret(0);
+ } else {
+ // Need to perform float-to-int conversion.
+ // Test the top of the FP stack for NaN.
+ Label is_nan;
+ __ fucomi(0);
+ __ j(parity_even, &is_nan);
+
+ if (array_type != kExternalUnsignedIntArray) {
+ __ push(eax); // Make room on stack
+ __ fistp_s(Operand(esp, 0));
+ __ pop(eax);
+ } else {
+ // fistp stores values as signed integers.
+ // To represent the entire range, we need to store as a 64-bit
+ // int and discard the high 32 bits.
+ __ push(eax); // Make room on stack
+ __ push(eax); // Make room on stack
+ __ fistp_d(Operand(esp, 0));
+ __ pop(eax);
+ __ mov(Operand(esp, 0), eax);
+ __ pop(eax);
+ }
+ // eax: untagged integer value
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray: {
+ // We also need to explicitly check for +/-Infinity. These are
+ // converted to MIN_INT, but we need to be careful not to
+ // confuse with legal uses of MIN_INT.
+ Label not_infinity;
+ // This test would apparently detect both NaN and Infinity,
+ // but we've already checked for NaN using the FPU hardware
+ // above.
+ __ mov_w(edi, FieldOperand(edx, HeapNumber::kValueOffset + 6));
+ __ and_(edi, 0x7FF0);
+ __ cmp(edi, 0x7FF0);
+ __ j(not_equal, ¬_infinity);
+ __ mov(eax, 0);
+ __ bind(¬_infinity);
+ __ mov(Operand(ecx, ebx, times_4, 0), eax);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ mov(eax, edx); // Return the original value.
+ __ ret(0);
+
+ __ bind(&is_nan);
+ __ ffree();
+ __ fincstp();
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ mov_b(Operand(ecx, ebx, times_1, 0), 0);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ mov(eax, 0);
+ __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ mov(Operand(ecx, ebx, times_4, 0), Immediate(0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ mov(eax, edx); // Return the original value.
+ __ ret(0);
+ }
+
+ // Slow case: call runtime.
+ __ bind(&slow);
+ Generate(masm, ExternalReference(Runtime::kSetProperty));
+}
+
+
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index e83bb92..08c4c0c 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -319,11 +319,17 @@
void MacroAssembler::FCmp() {
- fucompp();
- push(eax);
- fnstsw_ax();
- sahf();
- pop(eax);
+ if (CpuFeatures::IsSupported(CpuFeatures::CMOV)) {
+ fucomip();
+ ffree(0);
+ fincstp();
+ } else {
+ fucompp();
+ push(eax);
+ fnstsw_ax();
+ sahf();
+ pop(eax);
+ }
}
@@ -767,6 +773,24 @@
}
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ AllocateInNewSpace(HeapNumber::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+}
+
+
void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
@@ -1049,7 +1073,6 @@
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
unresolved_.Add(entry);
@@ -1068,7 +1091,6 @@
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
Bootstrapper::FixupFlagsUseCodeObject::encode(true);
Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
unresolved_.Add(entry);
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index ed72c96..a0a2428 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -206,6 +206,15 @@
// un-done.
void UndoAllocationInNewSpace(Register object);
+ // Allocate a heap number in new space with undefined value. The
+ // register scratch2 can be passed as no_reg; the others must be
+ // valid registers. Returns tagged pointer in result register, or
+ // jumps to gc_required if new space is full.
+ void AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
// Support functions.
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 1b8232f..980cec8 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -161,15 +161,16 @@
// on the stack.
int start = Min(begin, stack_pointer_ + 1);
- // If positive we have to adjust the stack pointer.
- int delta = end - stack_pointer_;
- if (delta > 0) {
- stack_pointer_ = end;
- __ sub(Operand(esp), Immediate(delta * kPointerSize));
- }
-
+ // Emit normal 'push' instructions for elements above stack pointer
+ // and use mov instructions if we are below stack pointer.
for (int i = start; i <= end; i++) {
- if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
+ if (!elements_[i].is_synced()) {
+ if (i <= stack_pointer_) {
+ SyncElementBelowStackPointer(i);
+ } else {
+ SyncElementByPushing(i);
+ }
+ }
}
}
@@ -454,14 +455,16 @@
Comment cmnt(masm(), "[ Enter JS frame");
#ifdef DEBUG
- // Verify that edi contains a JS function. The following code
- // relies on eax being available for use.
- __ test(edi, Immediate(kSmiTagMask));
- __ Check(not_zero,
- "VirtualFrame::Enter - edi is not a function (smi check).");
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
- __ Check(equal,
- "VirtualFrame::Enter - edi is not a function (map check).");
+ if (FLAG_debug_code) {
+ // Verify that edi contains a JS function. The following code
+ // relies on eax being available for use.
+ __ test(edi, Immediate(kSmiTagMask));
+ __ Check(not_zero,
+ "VirtualFrame::Enter - edi is not a function (smi check).");
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
+ __ Check(equal,
+ "VirtualFrame::Enter - edi is not a function (map check).");
+ }
#endif
EmitPush(ebp);