Merge from v8 at revision 3723
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 2cf469a..dc017ae 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -860,6 +860,24 @@
}
+void Assembler::cmpb(const Operand& dst, Register src) {
+ ASSERT(src.is_byte_register());
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x38);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::cmpb(Register dst, const Operand& src) {
+ ASSERT(dst.is_byte_register());
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x3A);
+ emit_operand(dst, src);
+}
+
+
void Assembler::cmpw(const Operand& op, Immediate imm16) {
ASSERT(imm16.is_int16());
EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index d675ecf..da27fd0 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -559,6 +559,8 @@
void and_(const Operand& dst, const Immediate& x);
void cmpb(const Operand& op, int8_t imm8);
+ void cmpb(Register src, const Operand& dst);
+ void cmpb(const Operand& dst, Register src);
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16);
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 240f4da..121e155 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -639,15 +639,22 @@
return frame_->Pop();
}
+//------------------------------------------------------------------------------
+// CodeGenerator implementation of variables, lookups, and stores.
-Reference::Reference(CodeGenerator* cgen, Expression* expression)
- : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
cgen->LoadReference(this);
}
Reference::~Reference() {
- cgen_->UnloadReference(this);
+ ASSERT(is_unloaded() || is_illegal());
}
@@ -697,6 +704,7 @@
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
frame_->Nip(ref->size());
+ ref->set_unloaded();
}
@@ -743,6 +751,12 @@
class FloatingPointHelper : public AllStatic {
public:
+
+ enum ArgLocation {
+ ARGS_ON_STACK,
+ ARGS_IN_REGISTERS
+ };
+
// Code pattern for loading a floating point value. Input value must
// be either a smi or a heap number object (fp value). Requirements:
// operand in register number. Returns operand as floating point number
@@ -750,9 +764,16 @@
static void LoadFloatOperand(MacroAssembler* masm, Register number);
// Code pattern for loading floating point values. Input values must
// be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
- // floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm, Register scratch);
+ // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
+ // Returns operands as floating point numbers on FPU stack.
+ static void LoadFloatOperands(MacroAssembler* masm,
+ Register scratch,
+ ArgLocation arg_location = ARGS_ON_STACK);
+
+ // Similar to LoadFloatOperand but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
+
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in eax, operand_2 in edx; falls through on float
// operands, jumps to the non_float label otherwise.
@@ -768,7 +789,11 @@
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
- static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
+ static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
+
+ // Similar to LoadSSE2Operands but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
};
@@ -913,31 +938,6 @@
return;
}
- // Set the flags based on the operation, type and loop nesting level.
- GenericBinaryFlags flags;
- switch (op) {
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- // Bit operations always assume they likely operate on Smis. Still only
- // generate the inline Smi check code if this operation is part of a loop.
- flags = (loop_nesting() > 0)
- ? NO_SMI_CODE_IN_STUB
- : NO_GENERIC_BINARY_FLAGS;
- break;
-
- default:
- // By default only inline the Smi check code for likely smis if this
- // operation is part of a loop.
- flags = ((loop_nesting() > 0) && type->IsLikelySmi())
- ? NO_SMI_CODE_IN_STUB
- : NO_GENERIC_BINARY_FLAGS;
- break;
- }
-
Result right = frame_->Pop();
Result left = frame_->Pop();
@@ -971,7 +971,6 @@
bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
- bool generate_no_smi_code = false; // No smi code at all, inline or in stub.
if (left_is_smi && right_is_smi) {
// Compute the constant result at compile time, and leave it on the frame.
@@ -980,33 +979,31 @@
if (FoldConstantSmis(op, left_int, right_int)) return;
}
+ Result answer;
if (left_is_non_smi || right_is_non_smi) {
- // Set flag so that we go straight to the slow case, with no smi code.
- generate_no_smi_code = true;
+ // Go straight to the slow case, with no smi code.
+ GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
} else if (right_is_smi) {
- ConstantSmiBinaryOperation(op, &left, right.handle(),
- type, false, overwrite_mode);
- return;
+ answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
+ type, false, overwrite_mode);
} else if (left_is_smi) {
- ConstantSmiBinaryOperation(op, &right, left.handle(),
- type, true, overwrite_mode);
- return;
- }
-
- if (((flags & NO_SMI_CODE_IN_STUB) != 0) && !generate_no_smi_code) {
- LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
+ type, true, overwrite_mode);
} else {
- frame_->Push(&left);
- frame_->Push(&right);
- // If we know the arguments aren't smis, use the binary operation stub
- // that does not check for the fast smi case.
- if (generate_no_smi_code) {
- flags = NO_SMI_CODE_IN_STUB;
+ // Set the flags based on the operation, type and loop nesting level.
+ // Bit operations always assume they likely operate on Smis. Still only
+ // generate the inline Smi check code if this operation is part of a loop.
+ // For all other operations only inline the Smi check code for likely smis
+ // if the operation is part of a loop.
+ if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
+ answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+ } else {
+ GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
+ answer = stub.GenerateCall(masm_, frame_, &left, &right);
}
- GenericBinaryOpStub stub(op, overwrite_mode, flags);
- Result answer = frame_->CallStub(&stub, 2);
- frame_->Push(&answer);
}
+ frame_->Push(&answer);
}
@@ -1093,10 +1090,11 @@
// Implements a binary operation using a deferred code object and some
// inline code to operate on smis quickly.
-void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode) {
+Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode) {
+ Result answer;
// Special handling of div and mod because they use fixed registers.
if (op == Token::DIV || op == Token::MOD) {
// We need eax as the quotient register, edx as the remainder
@@ -1218,7 +1216,7 @@
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push("ient);
+ answer = quotient;
} else {
ASSERT(op == Token::MOD);
// Check for a negative zero result. If the result is zero, and
@@ -1234,9 +1232,10 @@
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&remainder);
+ answer = remainder;
}
- return;
+ ASSERT(answer.is_valid());
+ return answer;
}
// Special handling of shift operations because they use fixed
@@ -1257,7 +1256,7 @@
frame_->Spill(ecx);
// Use a fresh answer register to avoid spilling the left operand.
- Result answer = allocator_->Allocate();
+ answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Check that both operands are smis using the answer register as a
// temporary.
@@ -1321,8 +1320,8 @@
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&answer);
- return;
+ ASSERT(answer.is_valid());
+ return answer;
}
// Handle the other binary operations.
@@ -1331,7 +1330,7 @@
// A newly allocated register answer is used to hold the answer. The
// registers containing left and right are not modified so they don't
// need to be spilled in the fast case.
- Result answer = allocator_->Allocate();
+ answer = allocator_->Allocate();
ASSERT(answer.is_valid());
// Perform the smi tag check.
@@ -1353,12 +1352,12 @@
__ mov(answer.reg(), left->reg());
switch (op) {
case Token::ADD:
- __ add(answer.reg(), Operand(right->reg())); // Add optimistically.
+ __ add(answer.reg(), Operand(right->reg()));
deferred->Branch(overflow);
break;
case Token::SUB:
- __ sub(answer.reg(), Operand(right->reg())); // Subtract optimistically.
+ __ sub(answer.reg(), Operand(right->reg()));
deferred->Branch(overflow);
break;
@@ -1406,7 +1405,8 @@
deferred->BindExit();
left->Unuse();
right->Unuse();
- frame_->Push(&answer);
+ ASSERT(answer.is_valid());
+ return answer;
}
@@ -1575,36 +1575,34 @@
}
-void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> value,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode) {
+Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> value,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a constant smi.
// Consumes the argument "operand".
-
// TODO(199): Optimize some special cases of operations involving a
// smi literal (multiply by 2, shift by 0, etc.).
if (IsUnsafeSmi(value)) {
Result unsafe_operand(value);
if (reversed) {
- LikelySmiBinaryOperation(op, &unsafe_operand, operand,
- overwrite_mode);
+ return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+ overwrite_mode);
} else {
- LikelySmiBinaryOperation(op, operand, &unsafe_operand,
- overwrite_mode);
+ return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+ overwrite_mode);
}
- ASSERT(!operand->is_valid());
- return;
}
// Get the literal value.
Smi* smi_value = Smi::cast(*value);
int int_value = smi_value->value();
+ Result answer;
switch (op) {
case Token::ADD: {
operand->ToRegister();
@@ -1627,13 +1625,12 @@
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
case Token::SUB: {
DeferredCode* deferred = NULL;
- Result answer; // Only allocate a new register if reversed.
if (reversed) {
// The reversed case is only hit when the right operand is not a
// constant.
@@ -1661,15 +1658,14 @@
deferred->Branch(not_zero);
deferred->BindExit();
operand->Unuse();
- frame_->Push(&answer);
break;
}
case Token::SAR:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -1689,21 +1685,21 @@
__ and_(operand->reg(), ~kSmiTagMask);
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
}
break;
case Token::SHR:
if (reversed) {
Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f;
operand->ToRegister();
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -1724,7 +1720,6 @@
operand->Unuse();
__ SmiTag(answer.reg());
deferred->BindExit();
- frame_->Push(&answer);
}
break;
@@ -1749,7 +1744,7 @@
}
operand->Unuse();
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
DeferredInlineSmiOperationReversed* deferred =
new DeferredInlineSmiOperationReversed(op,
answer.reg(),
@@ -1765,7 +1760,6 @@
__ SmiTag(answer.reg());
deferred->BindExit();
- frame_->Push(&answer);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -1783,10 +1777,10 @@
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
} else {
// Use a fresh temporary for nonzero shift values.
- Result answer = allocator()->Allocate();
+ answer = allocator()->Allocate();
ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
@@ -1808,7 +1802,6 @@
deferred->Branch(overflow);
deferred->BindExit();
operand->Unuse();
- frame_->Push(&answer);
}
}
break;
@@ -1847,7 +1840,7 @@
}
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
@@ -1873,7 +1866,7 @@
__ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
}
deferred->BindExit();
- frame_->Push(operand);
+ answer = *operand;
break;
}
// Fall through if we did not find a power of 2 on the right hand side!
@@ -1881,16 +1874,17 @@
default: {
Result constant_operand(value);
if (reversed) {
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+ overwrite_mode);
} else {
- LikelySmiBinaryOperation(op, operand, &constant_operand,
- overwrite_mode);
+ answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+ overwrite_mode);
}
break;
}
}
- ASSERT(!operand->is_valid());
+ ASSERT(answer.is_valid());
+ return answer;
}
@@ -2311,20 +2305,29 @@
}
-void CodeGenerator::CallApplyLazy(Property* apply,
+void CodeGenerator::CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position) {
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments).
+ // If the arguments object of the scope has not been allocated,
+ // and x.apply is Function.prototype.apply, this optimization
+ // just copies y and the arguments of the current function on the
+ // stack, as receiver and arguments, and calls x.
+ // In the implementation comments, we call x the applicand
+ // and y the receiver.
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
- JumpTarget slow, done;
-
- // Load the apply function onto the stack. This will usually
+ // Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
- Reference ref(this, apply);
- ref.GetValue();
- ASSERT(ref.type() == Reference::NAMED);
+ Load(applicand);
+ Handle<String> name = Factory::LookupAsciiSymbol("apply");
+ frame()->Push(name);
+ Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
+ __ nop();
+ frame()->Push(&answer);
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
@@ -2334,6 +2337,11 @@
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
+ // Contents of frame at this point:
+ // Frame[0]: arguments object of the current function or the hole.
+ // Frame[1]: receiver
+ // Frame[2]: applicand.apply
+ // Frame[3]: applicand.
// Check if the arguments object has been lazily allocated
// already. If so, just use that instead of copying the arguments
@@ -2341,143 +2349,151 @@
// named 'arguments' has been introduced.
frame_->Dup();
Result probe = frame_->Pop();
- bool try_lazy = true;
- if (probe.is_constant()) {
- try_lazy = probe.handle()->IsTheHole();
- } else {
- __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
- probe.Unuse();
- slow.Branch(not_equal);
- }
+ { VirtualFrame::SpilledScope spilled_scope;
+ Label slow, done;
+ bool try_lazy = true;
+ if (probe.is_constant()) {
+ try_lazy = probe.handle()->IsTheHole();
+ } else {
+ __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
+ probe.Unuse();
+ __ j(not_equal, &slow);
+ }
- if (try_lazy) {
- JumpTarget build_args;
+ if (try_lazy) {
+ Label build_args;
+ // Get rid of the arguments object probe.
+ frame_->Drop(); // Can be called on a spilled frame.
+ // Stack now has 3 elements on it.
+ // Contents of stack at this point:
+ // esp[0]: receiver
+ // esp[1]: applicand.apply
+ // esp[2]: applicand.
- // Get rid of the arguments object probe.
- frame_->Drop();
-
- // Before messing with the execution stack, we sync all
- // elements. This is bound to happen anyway because we're
- // about to call a function.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- // Check that the receiver really is a JavaScript object.
- { frame_->PushElementAt(0);
- Result receiver = frame_->Pop();
- receiver.ToRegister();
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- build_args.Branch(zero);
- Result tmp = allocator_->Allocate();
+ // Check that the receiver really is a JavaScript object.
+ __ mov(eax, Operand(esp, 0));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, tmp.reg());
- build_args.Branch(less);
- }
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ j(below, &build_args);
- // Verify that we're invoking Function.prototype.apply.
- { frame_->PushElementAt(1);
- Result apply = frame_->Pop();
- apply.ToRegister();
- __ test(apply.reg(), Immediate(kSmiTagMask));
- build_args.Branch(zero);
- Result tmp = allocator_->Allocate();
- __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
- build_args.Branch(not_equal);
- __ mov(tmp.reg(),
- FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+ // Check that applicand.apply is Function.prototype.apply.
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &build_args);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &build_args);
+ __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
+ __ cmp(FieldOperand(ecx, SharedFunctionInfo::kCodeOffset),
Immediate(apply_code));
- build_args.Branch(not_equal);
+ __ j(not_equal, &build_args);
+
+ // Check that applicand is a function.
+ __ mov(edi, Operand(esp, 2 * kPointerSize));
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &build_args);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(not_equal, &build_args);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ Label invoke, adapted;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame. Copy fixed number of arguments.
+ __ mov(eax, Immediate(scope_->num_parameters()));
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ push(frame_->ParameterAt(i));
+ }
+ __ jmp(&invoke);
+
+ // Arguments adaptor frame present. Copy arguments from there, but
+ // avoid copying too many arguments to avoid stack overflows.
+ __ bind(&adapted);
+ static const uint32_t kArgumentsLimit = 1 * KB;
+ __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(eax);
+ __ mov(ecx, Operand(eax));
+ __ cmp(eax, kArgumentsLimit);
+ __ j(above, &build_args);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack. We don't inform the virtual frame of the push, so we don't
+ // have to worry about getting rid of the elements from the virtual
+ // frame.
+ Label loop;
+ // ecx is a small non-negative integer, due to the test above.
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &invoke);
+ __ bind(&loop);
+ __ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &loop);
+
+ // Invoke the function.
+ __ bind(&invoke);
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ // Drop applicand.apply and applicand from the stack, and push
+ // the result of the function call, but leave the spilled frame
+ // unchanged, with 3 elements, so it is correct when we compile the
+ // slow-case code.
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ push(eax);
+ // Stack now has 1 element:
+ // esp[0]: result
+ __ jmp(&done);
+
+ // Slow-case: Allocate the arguments object since we know it isn't
+ // there, and fall-through to the slow-case where we call
+ // applicand.apply.
+ __ bind(&build_args);
+ // Stack now has 3 elements, because we have jumped from where:
+ // esp[0]: receiver
+ // esp[1]: applicand.apply
+ // esp[2]: applicand.
+
+ // StoreArgumentsObject requires a correct frame, and may modify it.
+ Result arguments_object = StoreArgumentsObject(false);
+ frame_->SpillAll();
+ arguments_object.ToRegister();
+ frame_->EmitPush(arguments_object.reg());
+ arguments_object.Unuse();
+ // Stack and frame now have 4 elements.
+ __ bind(&slow);
}
- // Get the function receiver from the stack. Check that it
- // really is a function.
- __ mov(edi, Operand(esp, 2 * kPointerSize));
- __ test(edi, Immediate(kSmiTagMask));
- build_args.Branch(zero);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- build_args.Branch(not_equal);
+ // Generic computation of x.apply(y, args) with no special optimization.
+ // Flip applicand.apply and applicand on the stack, so
+ // applicand looks like the receiver of the applicand.apply call.
+ // Then process it as a normal function call.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ __ mov(Operand(esp, 3 * kPointerSize), ebx);
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- Label invoke, adapted;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted);
-
- // No arguments adaptor frame. Copy fixed number of arguments.
- __ mov(eax, Immediate(scope_->num_parameters()));
- for (int i = 0; i < scope_->num_parameters(); i++) {
- __ push(frame_->ParameterAt(i));
- }
- __ jmp(&invoke);
-
- // Arguments adaptor frame present. Copy arguments from there, but
- // avoid copying too many arguments to avoid stack overflows.
- __ bind(&adapted);
- static const uint32_t kArgumentsLimit = 1 * KB;
- __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(eax);
- __ mov(ecx, Operand(eax));
- __ cmp(eax, kArgumentsLimit);
- build_args.Branch(above);
-
- // Loop through the arguments pushing them onto the execution
- // stack. We don't inform the virtual frame of the push, so we don't
- // have to worry about getting rid of the elements from the virtual
- // frame.
- Label loop;
- __ bind(&loop);
- __ test(ecx, Operand(ecx));
- __ j(zero, &invoke);
- __ push(Operand(edx, ecx, times_4, 1 * kPointerSize));
- __ dec(ecx);
- __ jmp(&loop);
-
- // Invoke the function. The virtual frame knows about the receiver
- // so make sure to forget that explicitly.
- __ bind(&invoke);
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
- frame_->Forget(1);
- Result result = allocator()->Allocate(eax);
- frame_->SetElementAt(0, &result);
- done.Jump();
-
- // Slow-case: Allocate the arguments object since we know it isn't
- // there, and fall-through to the slow-case where we call
- // Function.prototype.apply.
- build_args.Bind();
- Result arguments_object = StoreArgumentsObject(false);
- frame_->Push(&arguments_object);
- slow.Bind();
- }
-
- // Flip the apply function and the function to call on the stack, so
- // the function looks like the receiver of the apply call. This way,
- // the generic Function.prototype.apply implementation can deal with
- // the call like it usually does.
- Result a2 = frame_->Pop();
- Result a1 = frame_->Pop();
- Result ap = frame_->Pop();
- Result fn = frame_->Pop();
- frame_->Push(&ap);
- frame_->Push(&fn);
- frame_->Push(&a1);
- frame_->Push(&a2);
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
- Result res = frame_->CallStub(&call_function, 3);
- frame_->Push(&res);
-
- // All done. Restore context register after call.
- if (try_lazy) done.Bind();
+ CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
+ Result res = frame_->CallStub(&call_function, 3);
+ // The function and its two arguments have been dropped.
+ frame_->Drop(1); // Drop the receiver as well.
+ res.ToRegister();
+ frame_->EmitPush(res.reg());
+ // Stack now has 1 element:
+ // esp[0]: result
+ if (try_lazy) __ bind(&done);
+ } // End of spilled scope.
+ // Restore the context register after a call.
frame_->RestoreContextRegister();
}
@@ -3517,17 +3533,13 @@
if (!each.is_illegal()) {
if (each.size() > 0) {
frame_->EmitPush(frame_->ElementAt(each.size()));
- }
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
- if (each.size() > 0) {
- // It's safe to pop the value lying on top of the reference before
- // unloading the reference itself (which preserves the top of stack,
- // ie, now the topmost value of the non-zero sized reference), since
- // we will discard the top of stack after unloading the reference
- // anyway.
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(2);
+ } else {
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, ebx pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
frame_->Drop();
}
}
@@ -3535,10 +3547,6 @@
// Unloading a reference may leave the frame in an unspilled state.
frame_->SpillAll();
- // Discard the i'th entry pushed above or else the remainder of the
- // reference, whichever is currently on top of the stack.
- frame_->Drop();
-
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
@@ -4588,9 +4596,12 @@
void CodeGenerator::VisitAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
Comment cmnt(masm_, "[ Assignment");
- { Reference target(this, node->target());
+ { Reference target(this, node->target(), node->is_compound());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
@@ -4612,12 +4623,27 @@
frame_->PushElementAt(target.size() - 1);
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
+ if (node->ends_initialization_block()) {
+ // Add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
+ ASSERT(target.type() == Reference::NAMED ||
+ target.type() == Reference::KEYED);
+ if (target.type() == Reference::NAMED) {
+ frame_->Dup();
+ // Dup target receiver on stack.
+ } else {
+ ASSERT(target.type() == Reference::KEYED);
+ Result temp = frame_->Pop();
+ frame_->Dup();
+ frame_->Push(&temp);
+ }
+ }
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
node->op() == Token::INIT_CONST) {
Load(node->value());
- } else {
+ } else { // Assignment is a compound assignment.
Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
@@ -4643,6 +4669,7 @@
var->mode() == Variable::CONST &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
+ UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
@@ -4654,17 +4681,20 @@
target.SetValue(NOT_CONST_INIT);
}
if (node->ends_initialization_block()) {
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
+ ASSERT(target.type() == Reference::UNLOADED);
// End of initialization block. Revert to fast case. The
- // argument to the runtime call is the receiver, which is the
- // first value pushed as part of the reference, which is below
- // the lhs value.
- frame_->PushElementAt(target.size());
+ // argument to the runtime call is the extra copy of the receiver,
+ // which is below the value of the assignment.
+ // Swap the receiver and the value of the assignment expression.
+ Result lhs = frame_->Pop();
+ Result receiver = frame_->Pop();
+ frame_->Push(&lhs);
+ frame_->Push(&receiver);
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
}
+ ASSERT(frame_->height() == original_height + 1);
}
@@ -4827,7 +4857,7 @@
args->at(1)->AsVariableProxy()->IsArguments()) {
// Use the optimized Function.prototype.apply that avoids
// allocating lazily allocated arguments objects.
- CallApplyLazy(property,
+ CallApplyLazy(property->obj(),
args->at(0),
args->at(1)->AsVariableProxy(),
node->position());
@@ -4860,16 +4890,21 @@
// -------------------------------------------
// Load the function to call from the property through a reference.
- Reference ref(this, property);
- ref.GetValue();
// Pass receiver to called function.
if (property->is_synthetic()) {
+ Reference ref(this, property);
+ ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
} else {
- // The reference's size is non-negative.
- frame_->PushElementAt(ref.size());
+ Load(property->obj());
+ Load(property->key());
+ Result function = EmitKeyedLoad(false);
+ frame_->Drop(); // Key.
+ Result receiver = frame_->Pop();
+ frame_->Push(&function);
+ frame_->Push(&receiver);
}
// Call the function.
@@ -5183,6 +5218,26 @@
}
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ __ test(obj.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(),
+ FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ movzx_b(temp.reg(),
+ FieldOperand(temp.reg(), Map::kBitFieldOffset));
+ __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+ obj.Unuse();
+ temp.Unuse();
+ destination()->Split(not_zero);
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
@@ -5760,7 +5815,9 @@
// value will be in the frame to be spilled.
if (is_postfix) frame_->Push(Smi::FromInt(0));
- { Reference target(this, node->expression());
+ // A constant reference is not saved to, so a constant reference is not a
+ // compound assignment reference.
+ { Reference target(this, node->expression(), !is_const);
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
@@ -6363,6 +6420,114 @@
}
+Result CodeGenerator::EmitKeyedLoad(bool is_global) {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ // Inline array load code if inside of a loop. We do not know
+ // the receiver map yet, so we initially generate the code with
+ // a check against an invalid map. In the inline cache code, we
+ // patch the map check if appropriate.
+ if (loop_nesting() > 0) {
+ Comment cmnt(masm_, "[ Inlined load from keyed Property");
+
+ Result key = frame_->Pop();
+ Result receiver = frame_->Pop();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ // Use a fresh temporary to load the elements without destroying
+ // the receiver which is needed for the deferred slow case.
+ Result elements = allocator()->Allocate();
+ ASSERT(elements.is_valid());
+
+ // Use a fresh temporary for the index and later the loaded
+ // value.
+ Result index = allocator()->Allocate();
+ ASSERT(index.is_valid());
+
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue(index.reg(),
+ receiver.reg(),
+ key.reg(),
+ is_global);
+
+ // Check that the receiver is not a smi (only needed if this
+ // is not a load from the global context) and that it has the
+ // expected map.
+ if (!is_global) {
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+ }
+
+ // Initially, use an invalid map. The map is patched in the IC
+ // initialization code.
+ __ bind(deferred->patch_site());
+ // Use masm-> here instead of the double underscore macro since extra
+ // coverage code can interfere with the patching.
+ masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::null_value()));
+ deferred->Branch(not_equal);
+
+ // Check that the key is a smi.
+ __ test(key.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+
+ // Get the elements array from the receiver and check that it
+ // is not a dictionary.
+ __ mov(elements.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ deferred->Branch(not_equal);
+
+ // Shift the key to get the actual index value and check that
+ // it is within bounds.
+ __ mov(index.reg(), key.reg());
+ __ SmiUntag(index.reg());
+ __ cmp(index.reg(),
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+ deferred->Branch(above_equal);
+
+ // Load and check that the result is not the hole. We could
+ // reuse the index or elements register for the value.
+ //
+ // TODO(206): Consider whether it makes sense to try some
+ // heuristic about which register to reuse. For example, if
+ // one is eax, the we can reuse that one because the value
+ // coming from the deferred code will be in eax.
+ Result value = index;
+ __ mov(value.reg(), Operand(elements.reg(),
+ index.reg(),
+ times_4,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ elements.Unuse();
+ index.Unuse();
+ __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+ deferred->Branch(equal);
+ __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+ deferred->BindExit();
+ // Restore the receiver and key to the frame and push the
+ // result on top of it.
+ frame_->Push(&receiver);
+ frame_->Push(&key);
+ return value;
+ } else {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = frame_->CallKeyedLoadIC(mode);
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed load. The explicit nop instruction is here because
+ // the push that follows might be peep-hole optimized away.
+ __ nop();
+ return answer;
+ }
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -6475,121 +6640,21 @@
}
case KEYED: {
- Comment cmnt(masm, "[ Load from keyed Property");
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
-
- // Inline array load code if inside of a loop. We do not know
- // the receiver map yet, so we initially generate the code with
- // a check against an invalid map. In the inline cache code, we
- // patch the map check if appropriate.
- if (cgen_->loop_nesting() > 0) {
- Comment cmnt(masm, "[ Inlined load from keyed Property");
-
- Result key = cgen_->frame()->Pop();
- Result receiver = cgen_->frame()->Pop();
- key.ToRegister();
- receiver.ToRegister();
-
- // Use a fresh temporary to load the elements without destroying
- // the receiver which is needed for the deferred slow case.
- Result elements = cgen_->allocator()->Allocate();
- ASSERT(elements.is_valid());
-
- // Use a fresh temporary for the index and later the loaded
- // value.
- Result index = cgen_->allocator()->Allocate();
- ASSERT(index.is_valid());
-
- DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(index.reg(),
- receiver.reg(),
- key.reg(),
- is_global);
-
- // Check that the receiver is not a smi (only needed if this
- // is not a load from the global context) and that it has the
- // expected map.
- if (!is_global) {
- __ test(receiver.reg(), Immediate(kSmiTagMask));
- deferred->Branch(zero);
- }
-
- // Initially, use an invalid map. The map is patched in the IC
- // initialization code.
- __ bind(deferred->patch_site());
- // Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching.
- masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
- Immediate(Factory::null_value()));
- deferred->Branch(not_equal);
-
- // Check that the key is a smi.
- __ test(key.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
-
- // Get the elements array from the receiver and check that it
- // is not a dictionary.
- __ mov(elements.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
- __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- deferred->Branch(not_equal);
-
- // Shift the key to get the actual index value and check that
- // it is within bounds.
- __ mov(index.reg(), key.reg());
- __ SmiUntag(index.reg());
- __ cmp(index.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
- deferred->Branch(above_equal);
-
- // Load and check that the result is not the hole. We could
- // reuse the index or elements register for the value.
- //
- // TODO(206): Consider whether it makes sense to try some
- // heuristic about which register to reuse. For example, if
- // one is eax, the we can reuse that one because the value
- // coming from the deferred code will be in eax.
- Result value = index;
- __ mov(value.reg(), Operand(elements.reg(),
- index.reg(),
- times_4,
- FixedArray::kHeaderSize - kHeapObjectTag));
- elements.Unuse();
- index.Unuse();
- __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
- deferred->Branch(equal);
- __ IncrementCounter(&Counters::keyed_load_inline, 1);
-
- deferred->BindExit();
- // Restore the receiver and key to the frame and push the
- // result on top of it.
- cgen_->frame()->Push(&receiver);
- cgen_->frame()->Push(&key);
- cgen_->frame()->Push(&value);
-
- } else {
- Comment cmnt(masm, "[ Load from keyed Property");
- RelocInfo::Mode mode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- cgen_->frame()->Push(&answer);
- }
+ Result value = cgen_->EmitKeyedLoad(is_global);
+ cgen_->frame()->Push(&value);
break;
}
default:
UNREACHABLE();
}
+
+ if (!persist_after_get_) {
+ cgen_->UnloadReference(this);
+ }
}
@@ -6623,6 +6688,9 @@
ASSERT(slot->type() == Slot::LOCAL);
cgen_->frame()->TakeLocalAt(slot->index());
}
+
+ ASSERT(persist_after_get_);
+ // Do not unload the reference, because it is used in SetValue.
}
@@ -6752,6 +6820,7 @@
default:
UNREACHABLE();
}
+ cgen_->UnloadReference(this);
}
@@ -7062,143 +7131,335 @@
}
+Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
+ VirtualFrame* frame,
+ Result* left,
+ Result* right) {
+ if (ArgsInRegistersSupported()) {
+ SetArgsInRegisters();
+ return frame->CallStub(this, left, right);
+ } else {
+ frame->Push(left);
+ frame->Push(right);
+ return frame->CallStub(this, 2);
+ }
+}
+
+
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
- // Perform fast-case smi code for the operation (eax <op> ebx) and
- // leave result in register eax.
+ // 1. Move arguments into edx, eax except for DIV and MOD, which need the
+ // dividend in eax and edx free for the division. Use eax, ebx for those.
+ Comment load_comment(masm, "-- Load arguments");
+ Register left = edx;
+ Register right = eax;
+ if (op_ == Token::DIV || op_ == Token::MOD) {
+ left = eax;
+ right = ebx;
+ if (HasArgsInRegisters()) {
+ __ mov(ebx, eax);
+ __ mov(eax, edx);
+ }
+ }
+ if (!HasArgsInRegisters()) {
+ __ mov(right, Operand(esp, 1 * kPointerSize));
+ __ mov(left, Operand(esp, 2 * kPointerSize));
+ }
- // Prepare the smi check of both operands by or'ing them together
- // before checking against the smi mask.
- __ mov(ecx, Operand(ebx));
- __ or_(ecx, Operand(eax));
-
+ // 2. Prepare the smi check of both operands by oring them together.
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ Label not_smis;
+ Register combined = ecx;
+ ASSERT(!left.is(combined) && !right.is(combined));
switch (op_) {
+ case Token::BIT_OR:
+ // Perform the operation into eax and smi check the result. Preserve
+ // eax in case the result is not a smi.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, Operand(left)); // Bitwise or is commutative.
+ combined = right;
+ break;
+
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
case Token::ADD:
- __ add(eax, Operand(ebx)); // add optimistically
- __ j(overflow, slow, not_taken);
- break;
-
case Token::SUB:
- __ sub(eax, Operand(ebx)); // subtract optimistically
- __ j(overflow, slow, not_taken);
- break;
-
+ case Token::MUL:
case Token::DIV:
case Token::MOD:
- // Sign extend eax into edx:eax.
- __ cdq();
- // Check for 0 divisor.
- __ test(ebx, Operand(ebx));
- __ j(zero, slow, not_taken);
+ __ mov(combined, right);
+ __ or_(combined, Operand(left));
+ break;
+
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Move the right operand into ecx for the shift operation, use eax
+ // for the smi check register.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, Operand(left));
+ combined = right;
break;
default:
- // Fall-through to smi check.
break;
}
- // Perform the actual smi check.
- ASSERT(kSmiTag == 0); // adjust zero check if not the case
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, slow, not_taken);
+ // 3. Perform the smi check of the operands.
+ ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
+ __ test(combined, Immediate(kSmiTagMask));
+ __ j(not_zero, ¬_smis, not_taken);
+ // 4. Operands are both smis, perform the operation leaving the result in
+ // eax and check the result if necessary.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ Label use_fp_on_smis;
switch (op_) {
+ case Token::BIT_OR:
+ // Nothing to do.
+ break;
+
+ case Token::BIT_XOR:
+ ASSERT(right.is(eax));
+ __ xor_(right, Operand(left)); // Bitwise xor is commutative.
+ break;
+
+ case Token::BIT_AND:
+ ASSERT(right.is(eax));
+ __ and_(right, Operand(left)); // Bitwise and is commutative.
+ break;
+
+ case Token::SHL:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shl_cl(left);
+ // Check that the *signed* result fits in a smi.
+ __ cmp(left, 0xc0000000);
+ __ j(sign, &use_fp_on_smis, not_taken);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SAR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ sar_cl(left);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SHR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shr_cl(left);
+ // Check that the *unsigned* result fits in a smi.
+ // Neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging.
+ // - 0x40000000: this number would convert to negative when
+ // Smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi.
+ __ test(left, Immediate(0xc0000000));
+ __ j(not_zero, slow, not_taken);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
case Token::ADD:
+ ASSERT(right.is(eax));
+ __ add(right, Operand(left)); // Addition is commutative.
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ break;
+
case Token::SUB:
- // Do nothing here.
+ __ sub(left, Operand(right));
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ __ mov(eax, left);
break;
case Token::MUL:
// If the smi tag is 0 we can just leave the tag on one operand.
- ASSERT(kSmiTag == 0); // adjust code below if not the case
+ ASSERT(kSmiTag == 0); // Adjust code below if not the case.
+ // We can't revert the multiplication if the result is not a smi
+ // so save the right operand.
+ __ mov(ebx, right);
// Remove tag from one of the operands (but keep sign).
- __ SmiUntag(eax);
+ __ SmiUntag(right);
// Do multiplication.
- __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax
- // Go slow on overflows.
- __ j(overflow, slow, not_taken);
- // Check for negative zero result.
- __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
+ __ imul(right, Operand(left)); // Multiplication is commutative.
+ __ j(overflow, &use_fp_on_smis, not_taken);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(right, combined, &use_fp_on_smis);
break;
case Token::DIV:
- // Divide edx:eax by ebx.
- __ idiv(ebx);
- // Check for the corner case of dividing the most negative smi
- // by -1. We cannot use the overflow flag, since it is not set
- // by idiv instruction.
+ // We can't revert the division if the result is not a smi so
+ // save the left operand.
+ __ mov(edi, left);
+ // Check for 0 divisor.
+ __ test(right, Operand(right));
+ __ j(zero, &use_fp_on_smis, not_taken);
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for the corner case of dividing the most negative smi by
+ // -1. We cannot use the overflow flag, since it is not set by idiv
+ // instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ cmp(eax, 0x40000000);
- __ j(equal, slow);
- // Check for negative zero result.
- __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
+ __ j(equal, &use_fp_on_smis);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
// Check that the remainder is zero.
__ test(edx, Operand(edx));
- __ j(not_zero, slow);
+ __ j(not_zero, &use_fp_on_smis);
// Tag the result and store it in register eax.
__ SmiTag(eax);
break;
case Token::MOD:
- // Divide edx:eax by ebx.
- __ idiv(ebx);
- // Check for negative zero result.
- __ NegativeZeroTest(edx, ecx, slow); // use ecx = x | y
+ // Check for 0 divisor.
+ __ test(right, Operand(right));
+ __ j(zero, ¬_smis, not_taken);
+
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(edx, combined, slow);
// Move remainder to register eax.
- __ mov(eax, Operand(edx));
- break;
-
- case Token::BIT_OR:
- __ or_(eax, Operand(ebx));
- break;
-
- case Token::BIT_AND:
- __ and_(eax, Operand(ebx));
- break;
-
- case Token::BIT_XOR:
- __ xor_(eax, Operand(ebx));
- break;
-
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- // Move the second operand into register ecx.
- __ mov(ecx, Operand(ebx));
- // Remove tags from operands (but keep sign).
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- // Perform the operation.
- switch (op_) {
- case Token::SAR:
- __ sar_cl(eax);
- // No checks of result necessary
- break;
- case Token::SHR:
- __ shr_cl(eax);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, slow, not_taken);
- break;
- case Token::SHL:
- __ shl_cl(eax);
- // Check that the *signed* result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(sign, slow, not_taken);
- break;
- default:
- UNREACHABLE();
- }
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
+ __ mov(eax, edx);
break;
default:
UNREACHABLE();
+ }
+
+ // 5. Emit return of result in eax.
+ GenerateReturn(masm);
+
+ // 6. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ switch (op_) {
+ case Token::SHL: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Result we want is in left == edx, so we can put the allocated heap
+ // number in eax.
+ __ AllocateHeapNumber(eax, ecx, ebx, slow);
+ // Store the result in the HeapNumber and return.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(left));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ // It's OK to overwrite the right argument on the stack because we
+ // are about to return.
+ __ mov(Operand(esp, 1 * kPointerSize), left);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ GenerateReturn(masm);
+ break;
+ }
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Restore arguments to edx, eax.
+ switch (op_) {
+ case Token::ADD:
+ // Revert right = right + left.
+ __ sub(right, Operand(left));
+ break;
+ case Token::SUB:
+ // Revert left = left - right.
+ __ add(left, Operand(right));
+ break;
+ case Token::MUL:
+ // Right was clobbered but a copy is in ebx.
+ __ mov(right, ebx);
+ break;
+ case Token::DIV:
+ // Left was clobbered but a copy is in edi. Right is in ebx for
+ // division.
+ __ mov(edx, edi);
+ __ mov(eax, right);
+ break;
+ default: UNREACHABLE();
+ break;
+ }
+ __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+ } else { // SSE2 not available, use FPU.
+ FloatingPointHelper::LoadFloatSmis(masm, ebx);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+ }
+ __ mov(eax, ecx);
+ GenerateReturn(masm);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ // 7. Non-smi operands, fall out to the non-smi code with the operands in
+ // edx and eax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(¬_smis);
+ switch (op_) {
+ case Token::BIT_OR:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Right operand is saved in ecx and eax was destroyed by the smi
+ // check.
+ __ mov(eax, ecx);
+ break;
+
+ case Token::DIV:
+ case Token::MOD:
+ // Operands are in eax, ebx at this point.
+ __ mov(edx, eax);
+ __ mov(eax, ebx);
+ break;
+
+ default:
break;
}
}
@@ -7213,30 +7474,20 @@
// case smi code is not generated by the caller. Generating it here will speed
// up common operations.
if (HasSmiCodeInStub()) {
- Label slow;
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- GenerateSmiCode(masm, &slow);
- GenerateReturn(masm);
- // Too bad. The fast case smi code didn't succeed.
- __ bind(&slow);
+ GenerateSmiCode(masm, &call_runtime);
+ } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
+ GenerateLoadArguments(masm);
}
- // Make sure the arguments are in edx and eax.
- GenerateLoadArguments(masm);
-
// Floating point case.
switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV: {
- // eax: y
- // edx: x
-
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
+ FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
@@ -7245,59 +7496,15 @@
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- // Allocate a heap number, if needed.
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- __ mov(eax, Operand(edx));
- // Fall through!
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE: {
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- }
- default: UNREACHABLE();
- }
+ GenerateHeapResultAllocation(masm, &call_runtime);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
GenerateReturn(masm);
} else { // SSE2 not available, use FPU.
FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
- // Allocate a heap number, if needed.
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- __ mov(eax, Operand(edx));
- // Fall through!
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- FloatingPointHelper::LoadFloatOperands(masm, ecx);
-
+ FloatingPointHelper::LoadFloatOperands(
+ masm,
+ ecx,
+ FloatingPointHelper::ARGS_IN_REGISTERS);
switch (op_) {
case Token::ADD: __ faddp(1); break;
case Token::SUB: __ fsubp(1); break;
@@ -7305,8 +7512,13 @@
case Token::DIV: __ fdivp(1); break;
default: UNREACHABLE();
}
+ Label after_alloc_failure;
+ GenerateHeapResultAllocation(masm, &after_alloc_failure);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
GenerateReturn(masm);
+ __ bind(&after_alloc_failure);
+ __ ffree();
+ __ jmp(&call_runtime);
}
}
case Token::MOD: {
@@ -7319,12 +7531,8 @@
case Token::SAR:
case Token::SHL:
case Token::SHR: {
- Label non_smi_result, skip_allocation;
- Label operand_conversion_failure;
- FloatingPointHelper::LoadAsIntegers(
- masm,
- use_sse3_,
- &operand_conversion_failure);
+ Label non_smi_result;
+ FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
switch (op_) {
case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
@@ -7337,7 +7545,7 @@
if (op_ == Token::SHR) {
// Check if result is non-negative and fits in a smi.
__ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &non_smi_result);
+ __ j(not_zero, &call_runtime);
} else {
// Check if result fits in a smi.
__ cmp(eax, 0xc0000000);
@@ -7352,6 +7560,7 @@
__ bind(&non_smi_result);
// Allocate a heap number if needed.
__ mov(ebx, Operand(eax)); // ebx: result
+ Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
case OVERWRITE_RIGHT:
@@ -7380,15 +7589,6 @@
}
GenerateReturn(masm);
}
-
- // Go to runtime for non-number inputs.
- __ bind(&operand_conversion_failure);
- // SHR should return uint32 - go to runtime for non-smi/negative result.
- if (op_ == Token::SHR) {
- __ bind(&non_smi_result);
- }
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
break;
}
default: UNREACHABLE(); break;
@@ -7398,9 +7598,9 @@
// result. If arguments was passed in registers now place them on the
// stack in the correct order below the return address.
__ bind(&call_runtime);
- if (HasArgumentsInRegisters()) {
+ if (HasArgsInRegisters()) {
__ pop(ecx);
- if (HasArgumentsReversed()) {
+ if (HasArgsReversed()) {
__ push(eax);
__ push(edx);
} else {
@@ -7414,17 +7614,15 @@
// Test for string arguments before calling runtime.
Label not_strings, not_string1, string1;
Result answer;
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
- __ test(eax, Immediate(kSmiTagMask));
+ __ test(edx, Immediate(kSmiTagMask));
__ j(zero, ¬_string1);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, ¬_string1);
- // First argument is a a string, test second.
- __ test(edx, Immediate(kSmiTagMask));
+ // First argument is a string, test second.
+ __ test(eax, Immediate(kSmiTagMask));
__ j(zero, &string1);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &string1);
// First and second argument are strings. Jump to the string add stub.
@@ -7433,17 +7631,25 @@
// Only first argument is a string.
__ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+ __ InvokeBuiltin(
+ HasArgsReversed() ?
+ Builtins::STRING_ADD_RIGHT :
+ Builtins::STRING_ADD_LEFT,
+ JUMP_FUNCTION);
// First argument was not a string, test second.
__ bind(¬_string1);
- __ test(edx, Immediate(kSmiTagMask));
+ __ test(eax, Immediate(kSmiTagMask));
__ j(zero, ¬_strings);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, ¬_strings);
// Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+ __ InvokeBuiltin(
+ HasArgsReversed() ?
+ Builtins::STRING_ADD_LEFT :
+ Builtins::STRING_ADD_RIGHT,
+ JUMP_FUNCTION);
__ bind(¬_strings);
// Neither argument is a string.
@@ -7455,7 +7661,7 @@
break;
case Token::MUL:
__ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
+ break;
case Token::DIV:
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
@@ -7486,9 +7692,57 @@
}
+void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure) {
+ Label skip_allocation;
+ OverwriteMode mode = mode_;
+ if (HasArgsReversed()) {
+ if (mode == OVERWRITE_RIGHT) {
+ mode = OVERWRITE_LEFT;
+ } else if (mode == OVERWRITE_LEFT) {
+ mode = OVERWRITE_RIGHT;
+ }
+ }
+ switch (mode) {
+ case OVERWRITE_LEFT: {
+ // If the argument in edx is already an object, we skip the
+ // allocation of a heap number.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now edx can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(edx, Operand(ebx));
+ __ bind(&skip_allocation);
+ // Use object in edx as a result holder
+ __ mov(eax, Operand(edx));
+ break;
+ }
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now eax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(eax, ebx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+}
+
+
void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
// If arguments are not passed in registers read them from the stack.
- if (!HasArgumentsInRegisters()) {
+ if (!HasArgsInRegisters()) {
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
}
@@ -7498,7 +7752,7 @@
void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
// If arguments are not passed in registers remove them from the stack before
// returning.
- if (!HasArgumentsInRegisters()) {
+ if (!HasArgsInRegisters()) {
__ ret(2 * kPointerSize); // Remove both operands
} else {
__ ret(0);
@@ -7514,6 +7768,7 @@
Register source,
bool use_sse3,
Label* conversion_failure) {
+ ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
Label done, right_exponent, normal_exponent;
Register scratch = ebx;
Register scratch2 = edi;
@@ -7716,7 +7971,7 @@
}
-void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
+void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
// Load operand in edx into xmm0, or branch to not_numbers.
@@ -7748,16 +8003,40 @@
}
+void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ cvtsi2sd(xmm0, Operand(scratch));
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ cvtsi2sd(xmm1, Operand(scratch));
+}
+
+
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch) {
+ Register scratch,
+ ArgLocation arg_location) {
Label load_smi_1, load_smi_2, done_load_1, done;
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, edx);
+ } else {
+ __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ }
__ test(scratch, Immediate(kSmiTagMask));
__ j(zero, &load_smi_1, not_taken);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
__ bind(&done_load_1);
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, eax);
+ } else {
+ __ mov(scratch, Operand(esp, 1 * kPointerSize));
+ }
__ test(scratch, Immediate(kSmiTagMask));
__ j(zero, &load_smi_2, not_taken);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
@@ -7780,6 +8059,24 @@
}
+void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ mov(Operand(esp, 0), scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+}
+
+
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch) {
@@ -8114,10 +8411,24 @@
// esp[12]: subject string
// esp[16]: JSRegExp object
- Label runtime;
+ static const int kLastMatchInfoOffset = 1 * kPointerSize;
+ static const int kPreviousIndexOffset = 2 * kPointerSize;
+ static const int kSubjectOffset = 3 * kPointerSize;
+ static const int kJSRegExpOffset = 4 * kPointerSize;
+
+ Label runtime, invoke_regexp;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ test(ebx, Operand(ebx));
+ __ j(zero, &runtime, not_taken);
// Check that the first argument is a JSRegExp object.
- __ mov(eax, Operand(esp, 4 * kPointerSize));
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
ASSERT_EQ(0, kSmiTag);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
@@ -8153,7 +8464,7 @@
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the second argument is a string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, kSubjectOffset));
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
@@ -8165,7 +8476,7 @@
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the third argument is a positive smi.
- __ mov(eax, Operand(esp, 2 * kPointerSize));
+ __ mov(eax, Operand(esp, kPreviousIndexOffset));
__ test(eax, Immediate(kSmiTagMask | 0x80000000));
__ j(not_zero, &runtime);
// Check that it is not greater than the subject string length.
@@ -8176,7 +8487,7 @@
// ecx: RegExp data (FixedArray)
// edx: Number of capture registers
// Check that the fourth object is a JSArray object.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
@@ -8194,38 +8505,74 @@
__ j(greater, &runtime);
// ecx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string (only support
- // flat ascii strings).
- __ mov(eax, Operand(esp, 3 * kPointerSize));
+ // Check the representation and encoding of the subject string.
+ Label seq_string, seq_two_byte_string, check_code;
+ const int kStringRepresentationEncodingMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ __ mov(eax, Operand(esp, kSubjectOffset));
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ and_(ebx, kStringRepresentationMask | kStringEncodingMask);
- __ cmp(ebx, kSeqStringTag | kAsciiStringTag);
+ __ and_(ebx, kStringRepresentationEncodingMask);
+ // First check for sequential string.
+ ASSERT_EQ(0, kStringTag);
+ ASSERT_EQ(0, kSeqStringTag);
+ __ test(Operand(ebx),
+ Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ j(zero, &seq_string);
+
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string.
+ __ mov(edx, ebx);
+ __ and_(edx, kStringRepresentationMask);
+ __ cmp(edx, kConsStringTag);
__ j(not_equal, &runtime);
+ __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
+ __ cmp(Operand(edx), Immediate(Handle<String>(Heap::empty_string())));
+ __ j(not_equal, &runtime);
+ __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ and_(ebx, kStringRepresentationEncodingMask);
- // ecx: RegExp data (FixedArray)
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address();
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size();
- __ mov(eax, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ test(eax, Operand(eax));
- __ j(zero, &runtime, not_taken);
-
+ __ bind(&seq_string);
+ // eax: subject string (sequential either ascii to two byte)
+ // ebx: suject string type & kStringRepresentationEncodingMask
// ecx: RegExp data (FixedArray)
// Check that the irregexp code has been generated for an ascii string. If
- // it has the field contains a code object otherwise it contains the hole.
+ // it has, the field contains a code object otherwise it contains the hole.
+ __ cmp(ebx, kStringTag | kSeqStringTag | kTwoByteStringTag);
+ __ j(equal, &seq_two_byte_string);
+#ifdef DEBUG
+ __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
+ __ Check(equal, "Expected sequential ascii string");
+#endif
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(edi, Immediate(1)); // Type is ascii.
+ __ jmp(&check_code);
+
+ __ bind(&seq_two_byte_string);
+ // eax: subject string
+ // ecx: RegExp data (FixedArray)
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(edi, Immediate(0)); // Type is two byte.
+
+ __ bind(&check_code);
+ // Check that the irregexp code has been generated for If it has, the field
+ // contains a code object otherwise it contains the hole.
__ CmpObjectType(edx, CODE_TYPE, ebx);
__ j(not_equal, &runtime);
+ // eax: subject string
+ // edx: code
+ // edi: encoding of subject string (1 if ascii 0 if two_byte);
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
- __ mov(eax, Operand(esp, 3 * kPointerSize)); // Subject string.
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // Previous index.
- __ mov(ecx, Operand(esp, 4 * kPointerSize)); // JSRegExp object.
- __ SmiUntag(ebx); // Previous index from sim.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ mov(ecx, Operand(esp, kJSRegExpOffset));
+ __ SmiUntag(ebx); // Previous index from smi.
// eax: subject string
// ebx: previous index
@@ -8233,37 +8580,40 @@
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(&Counters::regexp_entry_native, 1);
- // Argument 8: Indicate that this is a direct call from JavaScript.
+ // Argument 7: Indicate that this is a direct call from JavaScript.
__ push(Immediate(1));
- // Argument 7: Start (high end) of backtracking stack memory area.
+ // Argument 6: Start (high end) of backtracking stack memory area.
__ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
__ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
__ push(ecx);
- // Argument 6: At start of string?
- __ xor_(Operand(ecx), ecx); // setcc only operated on cl (lower byte of ecx).
- __ test(ebx, Operand(ebx));
- __ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive.
- __ push(ecx);
-
// Argument 5: static offsets vector buffer.
__ push(Immediate(ExternalReference::address_of_static_offsets_vector()));
- // Argument 4: End of string data.
- __ mov(ecx, FieldOperand(eax, String::kLengthOffset));
- __ add(ecx, Operand(eax));
- __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ push(ecx);
+ // Argument 4: End of string data
+ // Argument 3: Start of string data
+ Label push_two_byte, push_rest;
+ __ test(edi, Operand(edi));
+ __ mov(edi, FieldOperand(eax, String::kLengthOffset));
+ __ j(zero, &push_two_byte);
+ __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
+ __ push(ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
+ __ push(ecx); // Argument 3.
+ __ jmp(&push_rest);
- // Argument 3: Start of string data.
- __ mov(ecx, ebx);
- __ add(ebx, Operand(eax)); // String is ASCII.
- __ add(Operand(ebx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ push(ebx);
+ __ bind(&push_two_byte);
+ ASSERT(kShortSize == 2);
+ __ lea(ecx, FieldOperand(eax, edi, times_2, SeqTwoByteString::kHeaderSize));
+ __ push(ecx); // Argument 4.
+ __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
+ __ push(ecx); // Argument 3.
+
+ __ bind(&push_rest);
// Argument 2: Previous index.
- __ push(ecx);
+ __ push(ebx);
// Argument 1: Subject string.
__ push(eax);
@@ -8272,7 +8622,7 @@
__ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(Operand(edx));
// Remove arguments.
- __ add(Operand(esp), Immediate(8 * kPointerSize));
+ __ add(Operand(esp), Immediate(7 * kPointerSize));
// Check the result.
Label success;
@@ -8299,7 +8649,7 @@
// Load RegExp data.
__ bind(&success);
- __ mov(eax, Operand(esp, 4 * kPointerSize));
+ __ mov(eax, Operand(esp, kJSRegExpOffset));
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
__ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
@@ -8307,7 +8657,7 @@
// edx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
// ebx: last_match_info backing store (FixedArray)
@@ -8317,11 +8667,11 @@
__ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
__ SmiUntag(edx); // Number of capture registers back from smi.
// Store last subject and last input.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, kSubjectOffset));
__ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
__ mov(ecx, ebx);
__ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
- __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, kSubjectOffset));
__ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
__ mov(ecx, ebx);
__ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
@@ -8335,7 +8685,7 @@
// ecx: offsets vector
// edx: number of capture registers
Label next_capture, done;
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // Read previous index.
+ __ mov(eax, Operand(esp, kPreviousIndexOffset));
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
@@ -8362,7 +8712,7 @@
__ bind(&done);
// Return last match info.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ ret(4 * kPointerSize);
// Do the runtime call to execute the regexp.
@@ -8520,7 +8870,7 @@
CpuFeatures::Scope use_sse2(SSE2);
CpuFeatures::Scope use_cmov(CMOV);
- FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
+ FloatingPointHelper::LoadSSE2Operands(masm, &check_for_symbols);
__ comisd(xmm0, xmm1);
// Jump to builtin for NaN.
@@ -8582,30 +8932,7 @@
__ bind(&check_for_strings);
- // Check that both objects are not smis.
- ASSERT_EQ(0, kSmiTag);
- __ mov(ebx, Operand(edx));
- __ and_(ebx, Operand(eax));
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &call_builtin);
-
- // Load instance type for both objects.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
- // Check that both are flat ascii strings.
- Label non_ascii_flat;
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiString =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- __ and_(ecx, kFlatAsciiString);
- __ cmp(ecx, kStringTag | kSeqStringTag | kAsciiStringTag);
- __ j(not_equal, &call_builtin);
- __ and_(ebx, kFlatAsciiString);
- __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
- __ j(not_equal, &call_builtin);
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &call_builtin);
// Inline comparison of ascii strings.
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@@ -9659,79 +9986,76 @@
Register scratch1,
Register scratch2,
Register scratch3) {
- Label compare_lengths, compare_lengths_1;
-
- // Find minimum length. If either length is zero just compare lengths.
+ Label result_not_equal;
+ Label result_greater;
+ Label compare_lengths;
+ // Find minimum length.
+ Label left_shorter;
__ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ test(scratch1, Operand(scratch1));
- __ j(zero, &compare_lengths_1);
- __ mov(scratch2, FieldOperand(right, String::kLengthOffset));
- __ test(scratch2, Operand(scratch2));
- __ j(zero, &compare_lengths_1);
- __ cmp(scratch1, Operand(scratch2));
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatures::Scope use_cmov(CMOV);
- __ cmov(greater, scratch1, Operand(scratch2));
- } else {
- Label l;
- __ j(less, &l);
- __ mov(scratch1, scratch2);
- __ bind(&l);
+ __ mov(scratch3, scratch1);
+ __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
+
+ Register length_delta = scratch3;
+
+ __ j(less_equal, &left_shorter);
+ // Right string is shorter. Change scratch1 to be length of right string.
+ __ sub(scratch1, Operand(length_delta));
+ __ bind(&left_shorter);
+
+ Register min_length = scratch1;
+
+ // If either length is zero, just compare lengths.
+ __ test(min_length, Operand(min_length));
+ __ j(zero, &compare_lengths);
+
+ // Change index to run from -min_length to -1 by adding min_length
+ // to string start. This means that loop ends when index reaches zero,
+ // which doesn't need an additional compare.
+ __ lea(left,
+ FieldOperand(left,
+ min_length, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ lea(right,
+ FieldOperand(right,
+ min_length, times_1,
+ SeqAsciiString::kHeaderSize));
+ __ neg(min_length);
+
+ Register index = min_length; // index = -min_length;
+
+ {
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ // Compare characters.
+ __ mov_b(scratch2, Operand(left, index, times_1, 0));
+ __ cmpb(scratch2, Operand(right, index, times_1, 0));
+ __ j(not_equal, &result_not_equal);
+ __ add(Operand(index), Immediate(1));
+ __ j(not_zero, &loop);
}
- Label result_greater, result_less;
- Label loop;
- // Compare next character.
- __ mov(scratch3, Immediate(-1)); // Index into strings.
- __ bind(&loop);
- // Compare characters.
- Label character_compare_done;
- __ add(Operand(scratch3), Immediate(1));
- __ mov_b(scratch2, Operand(left,
- scratch3,
- times_1,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ subb(scratch2, Operand(right,
- scratch3,
- times_1,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ j(not_equal, &character_compare_done);
- __ sub(Operand(scratch1), Immediate(1));
- __ j(not_zero, &loop);
- // If min length characters match compare lengths otherwise last character
- // compare is the result.
- __ bind(&character_compare_done);
- __ j(equal, &compare_lengths);
- __ j(less, &result_less);
- __ jmp(&result_greater);
-
- // Compare lengths.
- Label result_not_equal;
+ // Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
- __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ bind(&compare_lengths_1);
- __ sub(scratch1, FieldOperand(right, String::kLengthOffset));
+ __ test(length_delta, Operand(length_delta));
__ j(not_zero, &result_not_equal);
// Result is EQUAL.
ASSERT_EQ(0, EQUAL);
ASSERT_EQ(0, kSmiTag);
- __ xor_(eax, Operand(eax));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(2 * kPointerSize);
+
__ bind(&result_not_equal);
__ j(greater, &result_greater);
// Result is LESS.
- __ bind(&result_less);
- __ mov(eax, Immediate(Smi::FromInt(LESS)->value()));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ Set(eax, Immediate(Smi::FromInt(LESS)));
__ ret(2 * kPointerSize);
// Result is GREATER.
__ bind(&result_greater);
- __ mov(eax, Immediate(Smi::FromInt(GREATER)->value()));
- __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ Set(eax, Immediate(Smi::FromInt(GREATER)));
__ ret(2 * kPointerSize);
}
@@ -9752,41 +10076,19 @@
__ j(not_equal, ¬_same);
ASSERT_EQ(0, EQUAL);
ASSERT_EQ(0, kSmiTag);
- __ xor_(eax, Operand(eax));
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
__ bind(¬_same);
- // Check that both objects are not smis.
- ASSERT_EQ(0, kSmiTag);
- __ mov(ebx, Operand(edx));
- __ and_(ebx, Operand(eax));
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
-
- // Load instance type for both strings.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
- // Check that both are flat ascii strings.
- Label non_ascii_flat;
- __ and_(ecx, kStringRepresentationMask | kStringEncodingMask);
- __ cmp(ecx, kSeqStringTag | kAsciiStringTag);
- __ j(not_equal, &non_ascii_flat);
- const int kFlatAsciiString =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- __ and_(ebx, kFlatAsciiString);
- __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
- __ j(not_equal, &non_ascii_flat);
+ // Check that both objects are sequential ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
// Compare flat ascii strings.
+ __ IncrementCounter(&Counters::string_compare_native, 1);
GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
- __ bind(&non_ascii_flat);
-
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 56cf978..a81a7d1 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -43,57 +43,70 @@
// -------------------------------------------------------------------------
// Reference support
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
+// A reference is a C++ stack-allocated object that puts a
+// reference on the virtual frame. The reference may be consumed
+// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
+// When the lifetime (scope) of a valid reference ends, it must have
+// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
- enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen, Expression* expression);
+ enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+ Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
- ASSERT(type_ == ILLEGAL);
+ ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
+ void set_unloaded() {
+ ASSERT_NE(ILLEGAL, type_);
+ ASSERT_NE(UNLOADED, type_);
+ type_ = UNLOADED;
+ }
// The size the reference takes up on the stack.
- int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+ int size() const {
+ return (type_ < SLOT) ? 0 : type_;
+ }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+ bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
- // the expression stack, and it is left in place with its value above it.
+ // the expression stack, and it is consumed by the call unless the
+ // reference is for a compound assignment.
+ // If the reference is not consumed, it is left in place under its value.
void GetValue();
// Like GetValue except that the slot is expected to be written to before
- // being read from again. Thae value of the reference may be invalidated,
+ // being read from again. The value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
- // on the expression stack. The stored value is left in place (with the
- // reference intact below it) to support chained assignments.
+ // on the expression stack. The value is stored in the location specified
+ // by the reference, and is left on top of the stack, after the reference
+ // is popped from beneath it (unloaded).
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
+ // Keep the reference on the stack after get, so it can be used by set later.
+ bool persist_after_get_;
};
@@ -420,6 +433,11 @@
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
+ // Load a property of an object, returning it in a Result.
+ // The object and the property name are passed on the stack, and
+ // not changed.
+ Result EmitKeyedLoad(bool is_global);
+
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
@@ -444,20 +462,20 @@
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
- void ConstantSmiBinaryOperation(Token::Value op,
- Result* operand,
- Handle<Object> constant_operand,
- StaticType* type,
- bool reversed,
- OverwriteMode overwrite_mode);
+ Result ConstantSmiBinaryOperation(Token::Value op,
+ Result* operand,
+ Handle<Object> constant_operand,
+ StaticType* type,
+ bool reversed,
+ OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
- void LikelySmiBinaryOperation(Token::Value op,
- Result* left,
- Result* right,
- OverwriteMode overwrite_mode);
+ Result LikelySmiBinaryOperation(Token::Value op,
+ Result* left,
+ Result* right,
+ OverwriteMode overwrite_mode);
void Comparison(AstNode* node,
Condition cc,
@@ -479,10 +497,10 @@
CallFunctionFlags flags,
int position);
- // Use an optimized version of Function.prototype.apply that avoid
- // allocating the arguments object and just copies the arguments
- // from the stack.
- void CallApplyLazy(Property* apply,
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments). We call x the applicand and y the receiver.
+ // The optimization avoids allocating an arguments object if possible.
+ void CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position);
@@ -517,6 +535,7 @@
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
+ void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -613,8 +632,8 @@
friend class JumpTarget;
friend class Reference;
friend class Result;
- friend class FastCodeGenerator;
- friend class CodeGenSelector;
+ friend class FullCodeGenerator;
+ friend class FullCodeGenSyntaxChecker;
friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
@@ -651,6 +670,11 @@
void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
+ Result GenerateCall(MacroAssembler* masm,
+ VirtualFrame* frame,
+ Result* left,
+ Result* right);
+
private:
Token::Value op_;
OverwriteMode mode_;
@@ -697,11 +721,11 @@
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
+ void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
bool ArgsInRegistersSupported() {
- return ((op_ == Token::ADD) || (op_ == Token::SUB)
- || (op_ == Token::MUL) || (op_ == Token::DIV))
- && flags_ != NO_SMI_CODE_IN_STUB;
+ return op_ == Token::ADD || op_ == Token::SUB
+ || op_ == Token::MUL || op_ == Token::DIV;
}
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
@@ -710,8 +734,8 @@
void SetArgsInRegisters() { args_in_registers_ = true; }
void SetArgsReversed() { args_reversed_ = true; }
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
- bool HasArgumentsInRegisters() { return args_in_registers_; }
- bool HasArgumentsReversed() { return args_reversed_; }
+ bool HasArgsInRegisters() { return args_in_registers_; }
+ bool HasArgsReversed() { return args_reversed_; }
};
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 1fbaa3c..cb500d5 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -53,23 +53,25 @@
static ByteMnemonic two_operands_instr[] = {
{0x03, "add", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER},
- {0x23, "and", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER},
{0x09, "or", OPER_REG_OP_ORDER},
{0x0B, "or", REG_OPER_OP_ORDER},
{0x1B, "sbb", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER},
+ {0x23, "and", REG_OPER_OP_ORDER},
{0x29, "sub", OPER_REG_OP_ORDER},
{0x2A, "subb", REG_OPER_OP_ORDER},
{0x2B, "sub", REG_OPER_OP_ORDER},
- {0x84, "test_b", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER},
{0x31, "xor", OPER_REG_OP_ORDER},
{0x33, "xor", REG_OPER_OP_ORDER},
+ {0x38, "cmpb", OPER_REG_OP_ORDER},
+ {0x3A, "cmpb", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER},
+ {0x84, "test_b", REG_OPER_OP_ORDER},
+ {0x85, "test", REG_OPER_OP_ORDER},
{0x87, "xchg", REG_OPER_OP_ORDER},
{0x8A, "mov_b", REG_OPER_OP_ORDER},
{0x8B, "mov", REG_OPER_OP_ORDER},
+ {0x8D, "lea", REG_OPER_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
};
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
similarity index 90%
rename from src/ia32/fast-codegen-ia32.cc
rename to src/ia32/full-codegen-ia32.cc
index f485d9e..03fe54d 100644
--- a/src/ia32/fast-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -30,7 +30,7 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
-#include "fast-codegen.h"
+#include "full-codegen.h"
#include "parser.h"
namespace v8 {
@@ -51,7 +51,7 @@
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-ia32.h for its layout.
-void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+void FullCodeGenerator::Generate(FunctionLiteral* fun) {
function_ = fun;
SetFunctionPosition(fun);
@@ -160,7 +160,7 @@
}
-void FastCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence(int position) {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -193,7 +193,7 @@
}
-void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -236,7 +236,7 @@
}
-void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -279,7 +279,7 @@
}
-void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -320,7 +320,7 @@
}
-void FastCodeGenerator::ApplyTOS(Expression::Context context) {
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
@@ -361,7 +361,7 @@
}
-void FastCodeGenerator::DropAndApply(int count,
+void FullCodeGenerator::DropAndApply(int count,
Expression::Context context,
Register reg) {
ASSERT(count > 0);
@@ -413,7 +413,7 @@
}
-void FastCodeGenerator::Apply(Expression::Context context,
+void FullCodeGenerator::Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false) {
switch (context) {
@@ -478,7 +478,7 @@
}
-void FastCodeGenerator::DoTest(Expression::Context context) {
+void FullCodeGenerator::DoTest(Expression::Context context) {
// The value to test is in the accumulator. If the value might be needed
// on the stack (value/test and test/value contexts with a stack location
// desired), then the value is already duplicated on the stack.
@@ -612,7 +612,7 @@
}
-MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
@@ -631,13 +631,13 @@
}
-void FastCodeGenerator::Move(Register destination, Slot* source) {
+void FullCodeGenerator::Move(Register destination, Slot* source) {
MemOperand location = EmitSlotSearch(source, destination);
__ mov(destination, location);
}
-void FastCodeGenerator::Move(Slot* dst,
+void FullCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
@@ -653,7 +653,7 @@
}
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
@@ -751,7 +751,7 @@
}
-void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(esi); // The context is the first argument.
__ push(Immediate(pairs));
@@ -761,7 +761,7 @@
}
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
@@ -779,17 +779,21 @@
}
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var(), context_);
}
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
- Expression* rewrite = var->rewrite();
- if (rewrite == NULL) {
- ASSERT(var->is_global());
+ // Four cases: non-this global variables, lookup slots, all other
+ // types of slots, and parameters that rewrite to explicit property
+ // accesses on the arguments object.
+ Slot* slot = var->slot();
+ Property* property = var->AsProperty();
+
+ if (var->is_global() && !var->is_this()) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object on the stack.
@@ -803,34 +807,24 @@
// (eg, push/pop elimination).
__ nop();
DropAndApply(1, context, eax);
- } else if (rewrite->AsSlot() != NULL) {
- Slot* slot = rewrite->AsSlot();
- if (FLAG_debug_code) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL: {
- Comment cmnt(masm_, "Stack slot");
- break;
- }
- case Slot::CONTEXT: {
- Comment cmnt(masm_, "Context slot");
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- break;
- }
- }
- Apply(context, slot);
- } else {
- Comment cmnt(masm_, "Variable rewritten to property");
- // A variable has been rewritten into an explicit access to an object
- // property.
- Property* property = rewrite->AsProperty();
- ASSERT_NOT_NULL(property);
- // The only property expressions that can occur are of the form
- // "slot[literal]".
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ Comment cmnt(masm_, "Lookup slot");
+ __ push(esi); // Context.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ Apply(context, eax);
+
+ } else if (slot != NULL) {
+ Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+ ? "Context slot"
+ : "Stack slot");
+ Apply(context, slot);
+
+ } else {
+ Comment cmnt(masm_, "Rewritten parameter");
+ ASSERT_NOT_NULL(property);
+ // Rewritten parameter accesses are of the form "slot[literal]".
// Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
@@ -862,7 +856,7 @@
}
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
@@ -889,7 +883,7 @@
}
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
@@ -958,7 +952,7 @@
}
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
@@ -1008,7 +1002,7 @@
}
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(ecx, Immediate(key->handle()));
@@ -1018,7 +1012,7 @@
}
-void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
@@ -1026,7 +1020,7 @@
}
-void FastCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
Expression::Context context) {
__ push(result_register());
GenericBinaryOpStub stub(op,
@@ -1037,11 +1031,17 @@
}
-void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Expression::Context context) {
+ // Three main cases: global variables, lookup slots, and all other
+ // types of slots. Left-hand-side parameters that rewrite to
+ // explicit property accesses do not reach here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
+
+ Slot* slot = var->slot();
if (var->is_global()) {
+ ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in eax, variable name in
// ecx, and the global object on the stack.
@@ -1053,8 +1053,14 @@
// Overwrite the receiver on the stack with the result if needed.
DropAndApply(1, context, eax);
- } else if (var->slot() != NULL) {
- Slot* slot = var->slot();
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ __ push(result_register()); // Value.
+ __ push(esi); // Context.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ Apply(context, eax);
+
+ } else if (slot != NULL) {
switch (slot->type()) {
case Slot::LOCAL:
case Slot::PARAMETER:
@@ -1086,7 +1092,7 @@
}
-void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
@@ -1121,7 +1127,7 @@
}
-void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// If the assignment starts a block of assignments to the same object,
@@ -1157,7 +1163,7 @@
}
-void FastCodeGenerator::VisitProperty(Property* expr) {
+void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
@@ -1177,7 +1183,7 @@
}
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name,
RelocInfo::Mode mode) {
// Code common for calls using the IC.
@@ -1198,7 +1204,7 @@
}
-void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
// Code common for calls using the call stub.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -1215,7 +1221,7 @@
}
-void FastCodeGenerator::VisitCall(Call* expr) {
+void FullCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* fun = expr->expression();
Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1269,12 +1275,12 @@
} else {
// Call to some other expression. If the expression is an anonymous
// function literal not called in a loop, mark it as one that should
- // also use the fast code generator.
+ // also use the full code generator.
FunctionLiteral* lit = fun->AsFunctionLiteral();
if (lit != NULL &&
lit->name()->Equals(Heap::empty_string()) &&
loop_depth() == 0) {
- lit->set_try_fast_codegen(true);
+ lit->set_try_full_codegen(true);
}
VisitForValue(fun, kStack);
// Load global receiver object.
@@ -1286,7 +1292,7 @@
}
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
@@ -1321,7 +1327,7 @@
}
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
@@ -1353,7 +1359,7 @@
}
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
@@ -1457,13 +1463,26 @@
break;
}
+ case Token::ADD: {
+ Comment cmt(masm_, "[ UnaryOperation (ADD)");
+ VisitForValue(expr->expression(), kAccumulator);
+ Label no_conversion;
+ __ test(result_register(), Immediate(kSmiTagMask));
+ __ j(zero, &no_conversion);
+ __ push(result_register());
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ bind(&no_conversion);
+ Apply(context_, result_register());
+ break;
+ }
+
default:
UNREACHABLE();
}
}
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
// Expression can only be a property, a global or a (parameter or local)
@@ -1482,7 +1501,7 @@
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
Location saved_location = location_;
- location_ = kStack;
+ location_ = kAccumulator;
EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
Expression::kValue);
location_ = saved_location;
@@ -1498,11 +1517,15 @@
VisitForValue(prop->key(), kStack);
EmitKeyedPropertyLoad(prop);
}
- __ push(eax);
}
- // Convert to number.
+ // Call ToNumber only if operand is not a smi.
+ Label no_conversion;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &no_conversion);
+ __ push(eax);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+ __ bind(&no_conversion);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -1534,13 +1557,33 @@
}
}
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ if (loop_depth() > 0) {
+ if (expr->op() == Token::INC) {
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ } else {
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ }
+ __ j(overflow, &stub_call);
+ // We could eliminate this smi check if we split the code at
+ // the first smi check before calling ToNumber.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+ __ bind(&stub_call);
+ // Call stub. Undo operation first.
+ if (expr->op() == Token::INC) {
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ } else {
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ }
+ }
// Call stub for +1/-1.
- __ push(eax);
- __ push(Immediate(Smi::FromInt(1)));
GenericBinaryOpStub stub(expr->binary_op(),
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
- __ CallStub(&stub);
+ stub.GenerateCall(masm(), eax, Smi::FromInt(1));
+ __ bind(&done);
// Store the value returned in eax.
switch (assign_type) {
@@ -1595,7 +1638,7 @@
}
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
Comment cmnt(masm_, "[ BinaryOperation");
switch (expr->op()) {
case Token::COMMA:
@@ -1630,7 +1673,7 @@
}
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
// Always perform the comparison for its control flow. Pack the result
@@ -1745,25 +1788,25 @@
}
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
Apply(context_, eax);
}
-Register FastCodeGenerator::result_register() { return eax; }
+Register FullCodeGenerator::result_register() { return eax; }
-Register FastCodeGenerator::context_register() { return esi; }
+Register FullCodeGenerator::context_register() { return esi; }
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ mov(Operand(ebp, frame_offset), value);
}
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
__ mov(dst, CodeGenerator::ContextOperand(esi, context_index));
}
@@ -1771,7 +1814,7 @@
// ----------------------------------------------------------------------------
// Non-local control flow support.
-void FastCodeGenerator::EnterFinallyBlock() {
+void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
__ mov(edx, Operand(esp, 0));
@@ -1785,7 +1828,7 @@
}
-void FastCodeGenerator::ExitFinallyBlock() {
+void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(edx));
// Restore result register from stack.
__ pop(result_register());
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 5658605..ebc2cfa9 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -244,11 +244,10 @@
// Get the map of the receiver.
__ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks.
+
+ // Check bit field.
__ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ test(ebx, Immediate(kSlowCaseBitFieldMask));
__ j(not_zero, &slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index d7c7d3a..a16c103 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1454,6 +1454,36 @@
}
+void MacroAssembler::IncrementCounter(Condition cc,
+ StatsCounter* counter,
+ int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Label skip;
+ j(NegateCondition(cc), &skip);
+ pushfd();
+ IncrementCounter(counter, value);
+ popfd();
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(Condition cc,
+ StatsCounter* counter,
+ int value) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Label skip;
+ j(NegateCondition(cc), &skip);
+ pushfd();
+ DecrementCounter(counter, value);
+ popfd();
+ bind(&skip);
+ }
+}
+
+
void MacroAssembler::Assert(Condition cc, const char* msg) {
if (FLAG_debug_code) Check(cc, msg);
}
@@ -1495,6 +1525,38 @@
}
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that both objects are not smis.
+ ASSERT_EQ(0, kSmiTag);
+ mov(scratch1, Operand(object1));
+ and_(scratch1, Operand(object2));
+ test(scratch1, Immediate(kSmiTagMask));
+ j(zero, failure);
+
+ // Load instance type for both strings.
+ mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
+ mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
+ movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ // Interleave bits from both instance types and compare them in one check.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ and_(scratch1, kFlatAsciiStringMask);
+ and_(scratch2, kFlatAsciiStringMask);
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
+ j(not_equal, failure);
+}
+
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index ceecebf..3f000ee 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -392,6 +392,8 @@
void SetCounter(StatsCounter* counter, int value);
void IncrementCounter(StatsCounter* counter, int value);
void DecrementCounter(StatsCounter* counter, int value);
+ void IncrementCounter(Condition cc, StatsCounter* counter, int value);
+ void DecrementCounter(Condition cc, StatsCounter* counter, int value);
// ---------------------------------------------------------------------------
@@ -413,6 +415,17 @@
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ // ---------------------------------------------------------------------------
+ // String utilities.
+
+ // Checks if both objects are sequential ASCII strings, and jumps to label
+ // if either is not.
+ void JumpIfNotBothSequentialAsciiStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label *on_not_flat_ascii_strings);
+
private:
List<Unresolved> unresolved_;
bool generating_stub_;
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 4af59dd..f6da693 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -59,8 +59,6 @@
* call through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
- * - at_start (if 1, we are starting at the start of the
- * string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
@@ -74,6 +72,8 @@
* - backup of caller ebx
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
+ * - Boolean at start (if 1, we are starting at the start of the string,
+ * otherwise 0)
* - register 0 ebp[-4] (Only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
@@ -625,6 +625,7 @@
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
__ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ push(Immediate(0)); // Make room for "at start" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -667,6 +668,15 @@
// Store this value in a local variable, for use when clearing
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
+
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ mov(ebx, Operand(ebp, kStartIndex));
+ __ xor_(Operand(ecx), ecx); // setcc only operates on cl (lower byte of ecx).
+ __ test(ebx, Operand(ebx));
+ __ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive.
+ __ mov(Operand(ebp, kAtStart), ecx);
+
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 8e7a6a5..d9866b7 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -123,8 +123,7 @@
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
- static const int kAtStart = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kAtStart + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
@@ -133,8 +132,9 @@
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
index 3ebd2e6..94ef7bf 100644
--- a/src/ia32/simulator-ia32.h
+++ b/src/ia32/simulator-ia32.h
@@ -53,8 +53,8 @@
// Call the generated regexp code directly. The entry function pointer should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- entry(p0, p1, p2, p3, p4, p5, p6, p7)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ entry(p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)