Update V8 to r3431 as required by WebKit r51976.
Change-Id: I567392c3f8c0a0d5201a4249611ac4ccf468cd5b
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 47f0e96..7c0b0c6 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "debug.h"
#include "parser.h"
#include "register-allocator-inl.h"
@@ -92,7 +93,6 @@
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
- typeof_state_(NOT_INSIDE_TYPEOF),
true_target_(NULL),
false_target_(NULL),
previous_(NULL) {
@@ -101,11 +101,9 @@
CodeGenState::CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target)
: owner_(owner),
- typeof_state_(typeof_state),
true_target_(true_target),
false_target_(false_target),
previous_(owner->state()) {
@@ -144,6 +142,9 @@
// cp: callee's context
void CodeGenerator::GenCode(FunctionLiteral* fun) {
+ // Record the position for debugging purposes.
+ CodeForFunctionPosition(fun);
+
ZoneList<Statement*>* body = fun->body();
// Initialize state.
@@ -322,18 +323,32 @@
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
+ // Calculate the exact length of the return sequence and make sure that
+ // the constant pool is not emitted inside of the return sequence.
+ int32_t sp_delta = (scope_->num_parameters() + 1) * kPointerSize;
+ int return_sequence_length = Assembler::kJSReturnSequenceLength;
+ if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
+ // Additional mov instruction generated.
+ return_sequence_length++;
+ }
+ masm_->BlockConstPoolFor(return_sequence_length);
+
// Tear down the frame which will restore the caller's frame pointer and
// the link register.
frame_->Exit();
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
- masm_->add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
+ masm_->add(sp, sp, Operand(sp_delta));
masm_->Jump(lr);
// Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(kJSReturnSequenceLength,
+ // expected by the debugger. The add instruction above is an addressing
+ // mode 1 instruction where there are restrictions on which immediate values
+ // can be encoded in the instruction and which immediate values requires
+ // use of an additional instruction for moving the immediate to a temporary
+ // register.
+ ASSERT_EQ(return_sequence_length,
masm_->InstructionsGeneratedSince(&check_exit_codesize));
}
@@ -442,14 +457,13 @@
// register was set, has_cc() is true and cc_reg_ contains the condition to
// test for 'true'.
void CodeGenerator::LoadCondition(Expression* x,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc) {
ASSERT(!has_cc());
int original_height = frame_->height();
- { CodeGenState new_state(this, typeof_state, true_target, false_target);
+ { CodeGenState new_state(this, true_target, false_target);
Visit(x);
// If we hit a stack overflow, we may not have actually visited
@@ -479,13 +493,13 @@
}
-void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+void CodeGenerator::Load(Expression* expr) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
JumpTarget true_target;
JumpTarget false_target;
- LoadCondition(x, typeof_state, &true_target, &false_target, false);
+ LoadCondition(expr, &true_target, &false_target, false);
if (has_cc()) {
// Convert cc_reg_ into a boolean value.
@@ -552,24 +566,27 @@
}
-// TODO(1241834): Get rid of this function in favor of just using Load, now
-// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
-// variables w/o reference errors elsewhere.
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
+void CodeGenerator::LoadTypeofExpression(Expression* expr) {
+ // Special handling of identifiers as subexpressions of typeof.
VirtualFrame::SpilledScope spilled_scope;
- Variable* variable = x->AsVariableProxy()->AsVariable();
+ Variable* variable = expr->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // NOTE: This is somewhat nasty. We force the compiler to load
- // the variable as if through '<global>.<variable>' to make sure we
- // do not get reference errors.
+ // For a global variable we build the property reference
+ // <global>.<variable> and perform a (regular non-contextual) property
+ // load to make sure we do not get reference errors.
Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
Literal key(variable->name());
- // TODO(1241834): Fetch the position from the variable instead of using
- // no position.
Property property(&global, &key, RelocInfo::kNoPosition);
- LoadAndSpill(&property);
+ Reference ref(this, &property);
+ ref.GetValueAndSpill();
+ } else if (variable != NULL && variable->slot() != NULL) {
+ // For a variable that rewrites to a slot, we signal it is the immediate
+ // subexpression of a typeof.
+ LoadFromSlot(variable->slot(), INSIDE_TYPEOF);
+ frame_->SpillAll();
} else {
- LoadAndSpill(x, INSIDE_TYPEOF);
+ // Anything else can be handled normally.
+ LoadAndSpill(expr);
}
}
@@ -1066,27 +1083,6 @@
}
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) {}
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#if defined(DEBUG)
- void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
-#endif // defined(DEBUG)
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
// Call the function on the stack with the given arguments.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int position) {
@@ -1122,22 +1118,20 @@
void CodeGenerator::CheckStack() {
VirtualFrame::SpilledScope spilled_scope;
- if (FLAG_check_stack) {
- Comment cmnt(masm_, "[ check stack");
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- // Put the lr setup instruction in the delay slot. kInstrSize is added to
- // the implicit 8 byte offset that always applies to operations with pc and
- // gives a return address 12 bytes down.
- masm_->add(lr, pc, Operand(Assembler::kInstrSize));
- masm_->cmp(sp, Operand(ip));
- StackCheckStub stub;
- // Call the stub if lower.
- masm_->mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- }
+ Comment cmnt(masm_, "[ check stack");
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ // Put the lr setup instruction in the delay slot. kInstrSize is added to
+ // the implicit 8 byte offset that always applies to operations with pc and
+ // gives a return address 12 bytes down.
+ masm_->add(lr, pc, Operand(Assembler::kInstrSize));
+ masm_->cmp(sp, Operand(ip));
+ StackCheckStub stub;
+ // Call the stub if lower.
+ masm_->mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
@@ -1299,8 +1293,7 @@
JumpTarget then;
JumpTarget else_;
// if (cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &then, &else_, true);
+ LoadConditionAndSpill(node->condition(), &then, &else_, true);
if (frame_ != NULL) {
Branch(false, &else_);
}
@@ -1323,8 +1316,7 @@
ASSERT(!has_else_stm);
JumpTarget then;
// if (cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &then, &exit, true);
+ LoadConditionAndSpill(node->condition(), &then, &exit, true);
if (frame_ != NULL) {
Branch(false, &exit);
}
@@ -1339,8 +1331,7 @@
ASSERT(!has_then_stm);
JumpTarget else_;
// if (!cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &exit, &else_, true);
+ LoadConditionAndSpill(node->condition(), &exit, &else_, true);
if (frame_ != NULL) {
Branch(true, &exit);
}
@@ -1354,8 +1345,7 @@
Comment cmnt(masm_, "[ If");
ASSERT(!has_then_stm && !has_else_stm);
// if (cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &exit, &exit, false);
+ LoadConditionAndSpill(node->condition(), &exit, &exit, false);
if (frame_ != NULL) {
if (has_cc()) {
cc_reg_ = al;
@@ -1570,7 +1560,7 @@
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
- // Compile the test.
+ // Compile the test.
switch (info) {
case ALWAYS_TRUE:
// If control can fall off the end of the body, jump back to the
@@ -1593,8 +1583,9 @@
node->continue_target()->Bind();
}
if (has_valid_frame()) {
- LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
- &body, node->break_target(), true);
+ Comment cmnt(masm_, "[ DoWhileCondition");
+ CodeForDoWhileConditionPosition(node);
+ LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A invalid frame here indicates that control did not
// fall out of the test expression.
@@ -1633,8 +1624,7 @@
if (info == DONT_KNOW) {
JumpTarget body;
- LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
- &body, node->break_target(), true);
+ LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A NULL frame indicates that control did not fall out of the
// test expression.
@@ -1693,8 +1683,7 @@
// If the test is always true, there is no need to compile it.
if (info == DONT_KNOW) {
JumpTarget body;
- LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
- &body, node->break_target(), true);
+ LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
Branch(false, node->break_target());
}
@@ -1780,25 +1769,81 @@
primitive.Bind();
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0));
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
jsobject.Bind();
// Get the set of properties (as a FixedArray or Map).
- frame_->EmitPush(r0); // duplicate the object being enumerated
- frame_->EmitPush(r0);
+ // r0: value to be iterated over
+ frame_->EmitPush(r0); // Push the object being iterated over.
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ JumpTarget call_runtime;
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+ JumpTarget check_prototype;
+ JumpTarget use_cache;
+ __ mov(r1, Operand(r0));
+ loop.Bind();
+ // Check that there are no elements.
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ cmp(r2, r4);
+ call_runtime.Branch(ne);
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in r3 for the subsequent
+ // prototype load.
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
+ __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
+ __ cmp(r2, ip);
+ call_runtime.Branch(eq);
+ // Check that there in an enum cache in the non-empty instance
+ // descriptors. This is the case if the next enumeration index
+ // field does not contain a smi.
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ call_runtime.Branch(eq);
+ // For all objects but the receiver, check that the cache is empty.
+ // r4: empty fixed array root.
+ __ cmp(r1, r0);
+ check_prototype.Branch(eq);
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ cmp(r2, r4);
+ call_runtime.Branch(ne);
+ check_prototype.Bind();
+ // Load the prototype from the map and loop if non-null.
+ __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r1, ip);
+ loop.Branch(ne);
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ use_cache.Jump();
+
+ call_runtime.Bind();
+ // Call the runtime to get the property names for the object.
+ frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
- // If we got a Map, we can do a fast modification check.
- // Otherwise, we got a FixedArray, and we have to do a slow check.
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ // r0: map or fixed array (result from call to
+ // Runtime::kGetPropertyNamesFast)
__ mov(r2, Operand(r0));
__ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r1, ip);
fixed_array.Branch(ne);
+ use_cache.Bind();
// Get enum cache
+ // r0: map (either the result from a call to
+ // Runtime::kGetPropertyNamesFast or has been fetched directly from
+ // the object)
__ mov(r1, Operand(r0));
__ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
__ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
@@ -1863,9 +1908,7 @@
__ ldr(r0, frame_->ElementAt(4)); // push enumerable
frame_->EmitPush(r0);
frame_->EmitPush(r3); // push entry
- Result arg_count_reg(r0);
- __ mov(r0, Operand(1));
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2);
+ frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
__ mov(r3, Operand(r0));
// If the property has been removed while iterating, we just skip it.
@@ -2272,7 +2315,8 @@
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(node, script_, this);
// Check for stack-overflow exception.
if (HasStackOverflow()) {
ASSERT(frame_->height() == original_height);
@@ -2303,20 +2347,19 @@
Comment cmnt(masm_, "[ Conditional");
JumpTarget then;
JumpTarget else_;
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &then, &else_, true);
+ LoadConditionAndSpill(node->condition(), &then, &else_, true);
if (has_valid_frame()) {
Branch(false, &else_);
}
if (has_valid_frame() || then.is_linked()) {
then.Bind();
- LoadAndSpill(node->then_expression(), typeof_state());
+ LoadAndSpill(node->then_expression());
}
if (else_.is_linked()) {
JumpTarget exit;
if (has_valid_frame()) exit.Jump();
else_.Bind();
- LoadAndSpill(node->else_expression(), typeof_state());
+ LoadAndSpill(node->else_expression());
if (exit.is_linked()) exit.Bind();
}
ASSERT(frame_->height() == original_height + 1);
@@ -2383,10 +2426,6 @@
frame_->EmitPush(r0);
} else {
- // Note: We would like to keep the assert below, but it fires because of
- // some nasty code in LoadTypeofExpression() which should be removed...
- // ASSERT(!slot->var()->is_dynamic());
-
// Special handling for locals allocated in registers.
__ ldr(r0, SlotOperand(slot, r2));
frame_->EmitPush(r0);
@@ -2481,7 +2520,7 @@
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Slot");
- LoadFromSlot(node, typeof_state());
+ LoadFromSlot(node, NOT_INSIDE_TYPEOF);
ASSERT(frame_->height() == original_height + 1);
}
@@ -2500,7 +2539,7 @@
} else {
ASSERT(var->is_global());
Reference ref(this, node);
- ref.GetValueAndSpill(typeof_state());
+ ref.GetValueAndSpill();
}
ASSERT(frame_->height() == original_height + 1);
}
@@ -2836,7 +2875,7 @@
} else {
// +=, *= and similar binary assignments.
// Get the old value of the lhs.
- target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+ target.GetValueAndSpill();
Literal* literal = node->value()->AsLiteral();
bool overwrite =
(node->value()->AsBinaryOperation() != NULL &&
@@ -2901,7 +2940,7 @@
Comment cmnt(masm_, "[ Property");
{ Reference property(this, node);
- property.GetValueAndSpill(typeof_state());
+ property.GetValueAndSpill();
}
ASSERT(frame_->height() == original_height + 1);
}
@@ -3071,7 +3110,7 @@
// Load the function to call from the property through a reference.
Reference ref(this, property);
- ref.GetValueAndSpill(NOT_INSIDE_TYPEOF); // receiver
+ ref.GetValueAndSpill(); // receiver
// Pass receiver to called function.
if (property->is_synthetic()) {
@@ -3301,7 +3340,79 @@
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
+ Comment(masm_, "[ GenerateFastCharCodeAt");
+
+ LoadAndSpill(args->at(0));
+ LoadAndSpill(args->at(1));
+ frame_->EmitPop(r0); // Index.
+ frame_->EmitPop(r1); // String.
+
+ Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string;
+
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &slow); // The 'string' was a Smi.
+
+ ASSERT(kSmiTag == 0);
+ __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
+ __ b(ne, &slow); // The index was negative or not a Smi.
+
+ __ bind(&try_again_with_new_string);
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &slow);
+
+ // Now r2 has the string type.
+ __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
+ // Now r3 has the length of the string. Compare with the index.
+ __ cmp(r3, Operand(r0, LSR, kSmiTagSize));
+ __ b(le, &slow);
+
+ // Here we know the index is in range. Check that string is sequential.
+ ASSERT_EQ(0, kSeqStringTag);
+ __ tst(r2, Operand(kStringRepresentationMask));
+ __ b(ne, ¬_a_flat_string);
+
+ // Check whether it is an ASCII string.
+ ASSERT_EQ(0, kTwoByteStringTag);
+ __ tst(r2, Operand(kStringEncodingMask));
+ __ b(ne, &ascii_string);
+
+ // 2-byte string. We can add without shifting since the Smi tag size is the
+ // log2 of the number of bytes in a two-byte character.
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiShiftSize);
+ __ add(r1, r1, Operand(r0));
+ __ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ jmp(&end);
+
+ __ bind(&ascii_string);
+ __ add(r1, r1, Operand(r0, LSR, kSmiTagSize));
+ __ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ jmp(&end);
+
+ __ bind(¬_a_flat_string);
+ __ and_(r2, r2, Operand(kStringRepresentationMask));
+ __ cmp(r2, Operand(kConsStringTag));
+ __ b(ne, &slow);
+
+ // ConsString.
+ // Check that the right hand side is the empty string (ie if this is really a
+ // flat string in a cons string). If that is not the case we would rather go
+ // to the runtime system now, to flatten the string.
+ __ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset));
+ __ LoadRoot(r3, Heap::kEmptyStringRootIndex);
+ __ cmp(r2, Operand(r3));
+ __ b(ne, &slow);
+
+ // Get the first of the two strings.
+ __ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset));
+ __ jmp(&try_again_with_new_string);
+
+ __ bind(&slow);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+
+ __ bind(&end);
frame_->EmitPush(r0);
}
@@ -3325,6 +3436,51 @@
}
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r1);
+ __ tst(r1, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r1, ip);
+ true_target()->Branch(eq);
+
+ Register map_reg = r2;
+ __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
+ __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+ false_target()->Branch(eq);
+
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ false_target()->Branch(lt);
+ __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+ cc_reg_ = le;
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (%_ClassOf(arg) === 'Function')
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
+ __ tst(r0, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+ Register map_reg = r2;
+ __ CompareObjectType(r0, map_reg, r1, JS_FUNCTION_TYPE);
+ cc_reg_ = eq;
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 0);
@@ -3403,6 +3559,17 @@
}
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ frame_->CallRuntime(Runtime::kStringAdd, 2);
+ frame_->EmitPush(r0);
+}
+
+
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
@@ -3476,7 +3643,6 @@
if (op == Token::NOT) {
LoadConditionAndSpill(node->expression(),
- NOT_INSIDE_TYPEOF,
false_target(),
true_target(),
true);
@@ -3490,9 +3656,7 @@
if (property != NULL) {
LoadAndSpill(property->obj());
LoadAndSpill(property->key());
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else if (variable != NULL) {
Slot* slot = variable->slot();
@@ -3500,9 +3664,7 @@
LoadGlobal();
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable
@@ -3514,9 +3676,7 @@
frame_->EmitPush(r0);
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else {
// Default: Result of deleting non-global, not dynamically
@@ -3566,9 +3726,7 @@
smi_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
continue_label.Jump();
smi_label.Bind();
@@ -3590,9 +3748,7 @@
__ tst(r0, Operand(kSmiTagMask));
continue_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
continue_label.Bind();
break;
}
@@ -3637,7 +3793,7 @@
ASSERT(frame_->height() == original_height + 1);
return;
}
- target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+ target.GetValueAndSpill();
frame_->EmitPop(r0);
JumpTarget slow;
@@ -3677,9 +3833,7 @@
{
// Convert the operand to a number.
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0));
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
}
if (is_postfix) {
// Postfix: store to result (on the stack).
@@ -3731,7 +3885,6 @@
if (op == Token::AND) {
JumpTarget is_true;
LoadConditionAndSpill(node->left(),
- NOT_INSIDE_TYPEOF,
&is_true,
false_target(),
false);
@@ -3767,7 +3920,6 @@
}
is_true.Bind();
LoadConditionAndSpill(node->right(),
- NOT_INSIDE_TYPEOF,
true_target(),
false_target(),
false);
@@ -3779,7 +3931,6 @@
} else if (op == Token::OR) {
JumpTarget is_false;
LoadConditionAndSpill(node->left(),
- NOT_INSIDE_TYPEOF,
true_target(),
&is_false,
false);
@@ -3815,7 +3966,6 @@
}
is_false.Bind();
LoadConditionAndSpill(node->right(),
- NOT_INSIDE_TYPEOF,
true_target(),
false_target(),
false);
@@ -4000,28 +4150,35 @@
} else if (check->Equals(Heap::function_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
false_target()->Branch(eq);
- __ CompareObjectType(r1, r1, r1, JS_FUNCTION_TYPE);
+ Register map_reg = r2;
+ __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE);
+ true_target()->Branch(eq);
+ // Regular expressions are callable so typeof == 'function'.
+ __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE);
cc_reg_ = eq;
} else if (check->Equals(Heap::object_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
false_target()->Branch(eq);
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(r1, ip);
true_target()->Branch(eq);
+ Register map_reg = r2;
+ __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE);
+ false_target()->Branch(eq);
+
// It can be an undetectable object.
- __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
__ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
__ cmp(r1, Operand(1 << Map::kIsUndetectable));
false_target()->Branch(eq);
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
false_target()->Branch(lt);
- __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
+ __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
cc_reg_ = le;
} else {
@@ -4062,9 +4219,7 @@
case Token::IN: {
LoadAndSpill(left);
LoadAndSpill(right);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
frame_->EmitPush(r0);
break;
}
@@ -4114,7 +4269,7 @@
}
-void Reference::GetValue(TypeofState typeof_state) {
+void Reference::GetValue() {
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
ASSERT(!cgen_->has_cc());
@@ -4129,16 +4284,11 @@
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
- cgen_->LoadFromSlot(slot, typeof_state);
+ cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF);
break;
}
case NAMED: {
- // TODO(1241834): Make sure that this it is safe to ignore the
- // distinction between expressions in a typeof and not in a typeof. If
- // there is a chance that reference errors can be thrown below, we
- // must distinguish between the two kinds of loads (typeof expression
- // loads must not throw a reference error).
VirtualFrame* frame = cgen_->frame();
Comment cmnt(masm, "[ Load from named Property");
Handle<String> name(GetName());
@@ -4157,9 +4307,6 @@
}
case KEYED: {
- // TODO(1241834): Make sure that this it is safe to ignore the
- // distinction between expressions in a typeof and not in a typeof.
-
// TODO(181): Implement inlined version of array indexing once
// loop nesting is properly tracked on ARM.
VirtualFrame* frame = cgen_->frame();
@@ -4495,7 +4642,7 @@
// See comment for class.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
// the_int_ has the answer which is a signed int32 but not a Smi.
// We test for the special value that has a different exponent. This test
@@ -4568,6 +4715,22 @@
if (cc != eq) {
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but (undefined <= undefined)
+ // == false! See ECMAScript 11.8.5.
+ if (cc == le || cc == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, Operand(r2));
+ __ b(ne, &return_equal);
+ if (cc == le) {
+ __ mov(r0, Operand(GREATER)); // undefined <= undefined should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // undefined >= undefined should fail.
+ }
+ __ mov(pc, Operand(lr)); // Return.
+ }
}
}
__ bind(&return_equal);
@@ -4645,9 +4808,17 @@
// Rhs is a smi, lhs is a number.
__ push(lr);
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
+ } else {
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ }
+
+
// r3 and r2 are rhs as double.
__ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
__ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
@@ -4675,9 +4846,16 @@
__ push(lr);
__ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
+ } else {
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ }
+
__ pop(lr);
// Fall through to both_loaded_as_doubles.
}
@@ -4880,9 +5058,23 @@
// fall through if neither is a NaN. Also binds rhs_not_nan.
EmitNanCheck(masm, &rhs_not_nan, cc_);
- // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
- // answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions to implement double precision comparison.
+ __ fmdrr(d6, r0, r1);
+ __ fmdrr(d7, r2, r3);
+
+ __ fcmp(d6, d7);
+ __ vmrs(pc);
+ __ mov(r0, Operand(0), LeaveCC, eq);
+ __ mov(r0, Operand(1), LeaveCC, lt);
+ __ mvn(r0, Operand(0), LeaveCC, gt);
+ __ mov(pc, Operand(lr));
+ } else {
+ // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
+ // answer. Never falls through.
+ EmitTwoNonNanDoubleComparison(masm, cc_);
+ }
__ bind(¬_smis);
// At this point we know we are dealing with two different objects,
@@ -4935,7 +5127,6 @@
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ mov(r0, Operand(arg_count));
__ InvokeBuiltin(native, CALL_JS);
__ cmp(r0, Operand(0));
__ pop(pc);
@@ -4982,24 +5173,74 @@
// Since both are Smis there is no heap number to overwrite, so allocate.
// The new heap number is in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
- // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ push(lr);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ } else {
+ // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ __ push(lr);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
__ jmp(&do_the_call); // Tail call. No return.
// We jump to here if something goes wrong (one param is not a number of any
// sort or new-space allocation fails).
__ bind(&slow);
+
+ // Push arguments to the stack
__ push(r1);
__ push(r0);
- __ mov(r0, Operand(1)); // Set number of arguments.
+
+ if (Token::ADD == operation) {
+ // Test for string arguments before calling runtime.
+ // r1 : first argument
+ // r0 : second argument
+ // sp[0] : second argument
+ // sp[1] : first argument
+
+ Label not_strings, not_string1, string1;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, ¬_string1);
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, ¬_string1);
+
+ // First argument is a a string, test second.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &string1);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &string1);
+
+ // First and second argument are strings.
+ __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
+
+ // First argument was not a string, test second.
+ __ bind(¬_string1);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, ¬_strings);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, ¬_strings);
+
+ // Only second argument is a string.
+ __ b(¬_strings);
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
+
+ __ bind(¬_strings);
+ }
+
__ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
// We branch here if at least one of r0 and r1 is not a Smi.
@@ -5027,12 +5268,20 @@
// We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r6);
- __ push(lr);
- __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ } else {
+ // Write Smi from r0 to r3 and r2 in double format.
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub3(r3, r2, r7, r6);
+ __ push(lr);
+ __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
__ bind(&finished_loading_r0);
// Move r1 to a double in r0-r1.
@@ -5052,12 +5301,19 @@
// We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r6);
- __ push(lr);
- __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ } else {
+ // Write Smi from r1 to r1 and r0 in double format.
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub4(r1, r0, r7, r6);
+ __ push(lr);
+ __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
__ bind(&finished_loading_r1);
__ bind(&do_the_call);
@@ -5066,6 +5322,38 @@
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
// r5: Address of heap number for result.
+
+ if (CpuFeatures::IsSupported(VFP3) &&
+ ((Token::MUL == operation) ||
+ (Token::DIV == operation) ||
+ (Token::ADD == operation) ||
+ (Token::SUB == operation))) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions to implement
+ // double precision, add, subtract, multiply, divide.
+ __ fmdrr(d6, r0, r1);
+ __ fmdrr(d7, r2, r3);
+
+ if (Token::MUL == operation) {
+ __ fmuld(d5, d6, d7);
+ } else if (Token::DIV == operation) {
+ __ fdivd(d5, d6, d7);
+ } else if (Token::ADD == operation) {
+ __ faddd(d5, d6, d7);
+ } else if (Token::SUB == operation) {
+ __ fsubd(d5, d6, d7);
+ } else {
+ UNREACHABLE();
+ }
+
+ __ fmrrd(r0, r1, d5);
+
+ __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
+ __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
+ __ mov(r0, Operand(r5));
+ __ mov(pc, lr);
+ return;
+ }
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
__ AlignStack(0);
@@ -5134,38 +5422,49 @@
__ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
// Dest already has a Smi zero.
__ b(lt, &done);
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
- // We now have the exponent in dest. Subtract from 30 to get
- // how much to shift down.
- __ rsb(dest, dest, Operand(30));
-
+ if (!CpuFeatures::IsSupported(VFP3)) {
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
+ // We now have the exponent in dest. Subtract from 30 to get
+ // how much to shift down.
+ __ rsb(dest, dest, Operand(30));
+ }
__ bind(&right_exponent);
- // Get the top bits of the mantissa.
- __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to take.
- // We just orred in the implicit bit so that took care of one and we want to
- // leave the sign bit 0 so we subtract 2 bits from the shift distance.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
- // Put sign in zero flag.
- __ tst(scratch, Operand(HeapNumber::kSignMask));
- // Get the second half of the double. For some exponents we don't actually
- // need this because the bits get shifted out again, but it's probably slower
- // to test than just to do it.
- __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the last 10 bits.
- __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
- // Move down according to the exponent.
- __ mov(dest, Operand(scratch, LSR, dest));
- // Fix sign if sign bit was set.
- __ rsb(dest, dest, Operand(0), LeaveCC, ne);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions implementing double precision to integer
+ // conversion using round to zero.
+ __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ __ fmdrr(d7, scratch2, scratch);
+ __ ftosid(s15, d7);
+ __ fmrs(dest, s15);
+ } else {
+ // Get the top bits of the mantissa.
+ __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
+ // Put back the implicit 1.
+ __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to leave the sign bit 0 so we subtract 2 bits from the shift
+ // distance.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
+ // Put sign in zero flag.
+ __ tst(scratch, Operand(HeapNumber::kSignMask));
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the last 10 bits.
+ __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
+ // Move down according to the exponent.
+ __ mov(dest, Operand(scratch, LSR, dest));
+ // Fix sign if sign bit was set.
+ __ rsb(dest, dest, Operand(0), LeaveCC, ne);
+ }
__ bind(&done);
}
-
// For bitwise ops where the inputs are not both Smis we here try to determine
// whether both inputs are either Smis or at least heap numbers that can be
// represented by a 32 bit signed value. We truncate towards zero as required
@@ -5182,7 +5481,7 @@
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
- GetInt32(masm, r1, r3, r4, r5, &slow);
+ GetInt32(masm, r1, r3, r5, r4, &slow);
__ jmp(&done_checking_r1);
__ bind(&r1_is_smi);
__ mov(r3, Operand(r1, ASR, 1));
@@ -5192,7 +5491,7 @@
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
- GetInt32(masm, r0, r2, r4, r5, &slow);
+ GetInt32(masm, r0, r2, r5, r4, &slow);
__ jmp(&done_checking_r0);
__ bind(&r0_is_smi);
__ mov(r2, Operand(r0, ASR, 1));
@@ -5277,7 +5576,6 @@
__ bind(&slow);
__ push(r1); // restore stack
__ push(r0);
- __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
switch (op_) {
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
@@ -5659,7 +5957,6 @@
// Enter runtime system.
__ bind(&slow);
__ push(r0);
- __ mov(r0, Operand(0)); // Set number of arguments.
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
__ bind(¬_smi);
@@ -5797,7 +6094,7 @@
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- StackFrame::Type frame_type,
+ ExitFrame::Mode mode,
bool do_gc,
bool always_allocate) {
// r0: result parameter for PerformGC, if any
@@ -5857,7 +6154,7 @@
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(frame_type);
+ __ LeaveExitFrame(mode);
// check if we should retry or throw exception
Label retry;
@@ -5903,12 +6200,12 @@
// this by performing a garbage collection and retrying the
// builtin once.
- StackFrame::Type frame_type = is_debug_break
- ? StackFrame::EXIT_DEBUG
- : StackFrame::EXIT;
+ ExitFrame::Mode mode = is_debug_break
+ ? ExitFrame::MODE_DEBUG
+ : ExitFrame::MODE_NORMAL;
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(frame_type);
+ __ EnterExitFrame(mode);
// r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@@ -5923,7 +6220,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
false,
false);
@@ -5932,7 +6229,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
false);
@@ -5943,7 +6240,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
true);
@@ -6135,7 +6432,6 @@
// Slow-case. Tail call builtin.
__ bind(&slow);
- __ mov(r0, Operand(1)); // Arg count without receiver.
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
}
@@ -6178,7 +6474,7 @@
__ b(eq, &adaptor);
// Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
+ // through register r0. Use unsigned comparison to get negative
// check for free.
__ cmp(r1, r0);
__ b(cs, &slow);