Update v8 to bleeding_edge revision 3784
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index fd2fcd3..354436c 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -174,20 +174,6 @@
}
-Operand::Operand(Object** opp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(opp);
- rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Context** cpp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(cpp);
- rmode_ = RelocInfo::NONE;
-}
-
-
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 8b65b7c..208d583 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -398,8 +398,6 @@
RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(explicit Operand(const ExternalReference& f));
INLINE(explicit Operand(const char* s));
- INLINE(explicit Operand(Object** opp));
- INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 38f08d1..7bee98d 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -121,12 +121,13 @@
// -------------------------------------------------------------------------
// CodeGenerator implementation
-CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
+CodeGenerator::CodeGenerator(MacroAssembler* masm,
+ Handle<Script> script,
bool is_eval)
: is_eval_(is_eval),
script_(script),
deferred_(8),
- masm_(new MacroAssembler(NULL, buffer_size)),
+ masm_(masm),
scope_(NULL),
frame_(NULL),
allocator_(NULL),
@@ -142,7 +143,9 @@
// r1: called JS function
// cp: callee's context
-void CodeGenerator::GenCode(FunctionLiteral* fun) {
+void CodeGenerator::Generate(FunctionLiteral* fun,
+ Mode mode,
+ CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(fun);
@@ -168,8 +171,7 @@
// r1: called JS function
// cp: callee's context
allocator_->Initialize();
- frame_->Enter();
- // tos: code slot
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -178,104 +180,118 @@
}
#endif
- // Allocate space for locals and initialize them. This also checks
- // for stack overflow.
- frame_->AllocateStackSlots();
+ if (mode == PRIMARY) {
+ frame_->Enter();
+ // tos: code slot
+
+ // Allocate space for locals and initialize them. This also checks
+ // for stack overflow.
+ frame_->AllocateStackSlots();
+
+ VirtualFrame::SpilledScope spilled_scope;
+ int heap_slots = scope_->num_heap_slots();
+ if (heap_slots > 0) {
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ __ ldr(r0, frame_->Function());
+ frame_->EmitPush(r0);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ frame_->CallStub(&stub, 1);
+ } else {
+ frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
+
+#ifdef DEBUG
+ JumpTarget verified_true;
+ __ cmp(r0, Operand(cp));
+ verified_true.Branch(eq);
+ __ stop("NewContext: r0 is expected to be the same as cp");
+ verified_true.Bind();
+#endif
+ // Update context local.
+ __ str(cp, frame_->Context());
+ }
+
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ Variable* par = scope_->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // No parameters in global scope.
+ ASSERT(!scope_->is_global_scope());
+ __ ldr(r1, frame_->ParameterAt(i));
+ // Loads r2 with context; used below in RecordWrite.
+ __ str(r1, SlotOperand(slot, r2));
+ // Load the offset into r3.
+ int slot_offset =
+ FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ mov(r3, Operand(slot_offset));
+ __ RecordWrite(r2, r3, r1);
+ }
+ }
+ }
+
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in the
+ // context.
+ if (scope_->arguments() != NULL) {
+ Comment cmnt(masm_, "[ allocate arguments object");
+ ASSERT(scope_->arguments_shadow() != NULL);
+ Variable* arguments = scope_->arguments()->var();
+ Variable* shadow = scope_->arguments_shadow()->var();
+ ASSERT(arguments != NULL && arguments->slot() != NULL);
+ ASSERT(shadow != NULL && shadow->slot() != NULL);
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ ldr(r2, frame_->Function());
+ // The receiver is below the arguments, the return address, and the
+ // frame pointer on the stack.
+ const int kReceiverDisplacement = 2 + scope_->num_parameters();
+ __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
+ __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+ frame_->Adjust(3);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
+ frame_->CallStub(&stub, 3);
+ frame_->EmitPush(r0);
+ StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+ frame_->Drop(); // Value is no longer needed.
+ }
+
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ __ mov(ip, Operand(Factory::the_hole_value()));
+ frame_->EmitPush(ip);
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+ } else {
+ // When used as the secondary compiler for splitting, r1, cp,
+ // fp, and lr have been pushed on the stack. Adjust the virtual
+ // frame to match this state.
+ frame_->Adjust(4);
+ allocator_->Unuse(r1);
+ allocator_->Unuse(lr);
+ }
+
// Initialize the function return target after the locals are set
// up, because it needs the expected frame height from the frame.
function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
function_return_is_shadowed_ = false;
- VirtualFrame::SpilledScope spilled_scope;
- int heap_slots = scope_->num_heap_slots();
- if (heap_slots > 0) {
- // Allocate local context.
- // Get outer context and create a new context based on it.
- __ ldr(r0, frame_->Function());
- frame_->EmitPush(r0);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kNewContext, 1);
- }
-
-#ifdef DEBUG
- JumpTarget verified_true;
- __ cmp(r0, Operand(cp));
- verified_true.Branch(eq);
- __ stop("NewContext: r0 is expected to be the same as cp");
- verified_true.Bind();
-#endif
- // Update context local.
- __ str(cp, frame_->Context());
- }
-
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
-
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope_->num_parameters(); i++) {
- Variable* par = scope_->parameter(i);
- Slot* slot = par->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- ASSERT(!scope_->is_global_scope()); // no parameters in global scope
- __ ldr(r1, frame_->ParameterAt(i));
- // Loads r2 with context; used below in RecordWrite.
- __ str(r1, SlotOperand(slot, r2));
- // Load the offset into r3.
- int slot_offset =
- FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(slot_offset));
- __ RecordWrite(r2, r3, r1);
- }
- }
- }
-
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in the
- // context.
- if (scope_->arguments() != NULL) {
- Comment cmnt(masm_, "[ allocate arguments object");
- ASSERT(scope_->arguments_shadow() != NULL);
- Variable* arguments = scope_->arguments()->var();
- Variable* shadow = scope_->arguments_shadow()->var();
- ASSERT(arguments != NULL && arguments->slot() != NULL);
- ASSERT(shadow != NULL && shadow->slot() != NULL);
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ ldr(r2, frame_->Function());
- // The receiver is below the arguments, the return address, and the
- // frame pointer on the stack.
- const int kReceiverDisplacement = 2 + scope_->num_parameters();
- __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
- __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
- frame_->Adjust(3);
- __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
- StoreToSlot(arguments->slot(), NOT_CONST_INIT);
- StoreToSlot(shadow->slot(), NOT_CONST_INIT);
- frame_->Drop(); // Value is no longer needed.
- }
-
- // Initialize ThisFunction reference if present.
- if (scope_->is_function_scope() && scope_->function() != NULL) {
- __ mov(ip, Operand(Factory::the_hole_value()));
- frame_->EmitPush(ip);
- StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
- }
-
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -2286,7 +2302,8 @@
Comment cmnt(masm_, "[ DebuggerStatament");
CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
- frame_->CallRuntime(Runtime::kDebugBreak, 0);
+ DebuggerStatementStub ces;
+ frame_->CallStub(&ces, 0);
#endif
// Ignore the return value.
ASSERT(frame_->height() == original_height);
@@ -2589,13 +2606,12 @@
// Load the global object.
LoadGlobal();
// Setup the name register.
- Result name(r2);
__ mov(r2, Operand(slot->var()->name()));
// Call IC stub.
if (typeof_state == INSIDE_TYPEOF) {
- frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
} else {
- frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, &name, 0);
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, 0);
}
// Drop the global object. The result is in r0.
@@ -3158,22 +3174,15 @@
}
// r0: the number of arguments.
- Result num_args(r0);
__ mov(r0, Operand(arg_count));
-
// Load the function into r1 as per calling convention.
- Result function(r1);
__ ldr(r1, frame_->ElementAt(arg_count + 1));
// Call the construct call builtin that handles allocation and
// constructor invocation.
CodeForSourcePosition(node->position());
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
- frame_->CallCodeObject(ic,
- RelocInfo::CONSTRUCT_CALL,
- &num_args,
- &function,
- arg_count + 1);
+ frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
// Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
__ str(r0, frame_->Top());
@@ -3723,6 +3732,9 @@
frame_->EmitPush(r0); // r0 has result
} else {
+ bool overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
LoadAndSpill(node->expression());
frame_->EmitPop(r0);
switch (op) {
@@ -3733,9 +3745,6 @@
break;
case Token::SUB: {
- bool overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
GenericUnaryOpStub stub(Token::SUB, overwrite);
frame_->CallStub(&stub, 0);
break;
@@ -3748,10 +3757,10 @@
__ tst(r0, Operand(kSmiTagMask));
smi_label.Branch(eq);
- frame_->EmitPush(r0);
- frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
-
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ frame_->CallStub(&stub, 0);
continue_label.Jump();
+
smi_label.Bind();
__ mvn(r0, Operand(r0));
__ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
@@ -4330,13 +4339,12 @@
Variable* var = expression_->AsVariableProxy()->AsVariable();
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Setup the name register.
- Result name_reg(r2);
__ mov(r2, Operand(name));
ASSERT(var == NULL || var->is_global());
RelocInfo::Mode rmode = (var == NULL)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- frame->CallCodeObject(ic, rmode, &name_reg, 0);
+ frame->CallCodeObject(ic, rmode, 0);
frame->EmitPush(r0);
break;
}
@@ -4377,6 +4385,7 @@
Comment cmnt(masm, "[ Store to Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
cgen_->StoreToSlot(slot, init_state);
+ cgen_->UnloadReference(this);
break;
}
@@ -4386,18 +4395,12 @@
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Handle<String> name(GetName());
- Result value(r0);
frame->EmitPop(r0);
-
// Setup the name register.
- Result property_name(r2);
__ mov(r2, Operand(name));
- frame->CallCodeObject(ic,
- RelocInfo::CODE_TARGET,
- &value,
- &property_name,
- 0);
+ frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
frame->EmitPush(r0);
+ cgen_->UnloadReference(this);
break;
}
@@ -4410,17 +4413,16 @@
// Call IC code.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
// TODO(1222589): Make the IC grab the values from the stack.
- Result value(r0);
frame->EmitPop(r0); // value
- frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
+ frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
frame->EmitPush(r0);
+ cgen_->UnloadReference(this);
break;
}
default:
UNREACHABLE();
}
- cgen_->UnloadReference(this);
}
@@ -6102,59 +6104,96 @@
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- ASSERT(op_ == Token::SUB);
+ Label slow, done;
- Label undo;
- Label slow;
- Label not_smi;
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &try_float);
- // Enter runtime system if the value is not a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, ¬_smi);
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ cmp(r0, Operand(0));
+ __ b(eq, &slow);
- // Enter runtime system if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ cmp(r0, Operand(0));
- __ b(eq, &slow);
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ __ rsb(r1, r0, Operand(0), SetCC);
+ __ b(vs, &slow);
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- __ rsb(r1, r0, Operand(0), SetCC);
- __ b(vs, &slow);
+ __ mov(r0, Operand(r1)); // Set r0 to result.
+ __ b(&done);
- __ mov(r0, Operand(r1)); // Set r0 to result.
+ __ bind(&try_float);
+ __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+ // r0 is a heap number. Get a new heap number in r1.
+ if (overwrite_) {
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ } else {
+ AllocateHeapNumber(masm, &slow, r1, r2, r3);
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
+ __ mov(r0, Operand(r1));
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+
+ // Convert the heap number is r0 to an untagged integer in r1.
+ GetInt32(masm, r0, r1, r2, r3, &slow);
+
+ // Do the bitwise operation (move negated) and check if the result
+ // fits in a smi.
+ Label try_float;
+ __ mvn(r1, Operand(r1));
+ __ add(r2, r1, Operand(0x40000000), SetCC);
+ __ b(mi, &try_float);
+ __ mov(r0, Operand(r1, LSL, kSmiTagSize));
+ __ b(&done);
+
+ __ bind(&try_float);
+ if (!overwrite_) {
+ // Allocate a fresh heap number, but don't overwrite r0 until
+ // we're sure we can do it without going through the slow case
+ // that needs the value in r0.
+ AllocateHeapNumber(masm, &slow, r2, r3, r4);
+ __ mov(r0, Operand(r2));
+ }
+
+ // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+ // have to set up a frame.
+ WriteInt32ToHeapNumberStub stub(r1, r0, r2);
+ __ push(lr);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ } else {
+ UNIMPLEMENTED();
+ }
+
+ __ bind(&done);
__ StubReturn(1);
- // Enter runtime system.
+ // Handle the slow case by jumping to the JavaScript builtin.
__ bind(&slow);
__ push(r0);
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
-
- __ bind(¬_smi);
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (overwrite_) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- AllocateHeapNumber(masm, &slow, r1, r2, r3);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
}
- __ StubReturn(1);
-}
-
-
-int CEntryStub::MinorKey() {
- ASSERT(result_size_ <= 2);
- // Result returned in r0 or r0+r1 by default.
- return 0;
}
@@ -6265,7 +6304,6 @@
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- ExitFrame::Mode mode,
bool do_gc,
bool always_allocate) {
// r0: result parameter for PerformGC, if any
@@ -6325,7 +6363,7 @@
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(mode);
+ __ LeaveExitFrame(mode_);
// check if we should retry or throw exception
Label retry;
@@ -6358,7 +6396,7 @@
}
-void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS function
// r0: number of arguments including receiver
// r1: pointer to builtin function
@@ -6366,17 +6404,15 @@
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
+ // Result returned in r0 or r0+r1 by default.
+
// NOTE: Invocations of builtins may return failure objects
// instead of a proper result. The builtin entry handles
// this by performing a garbage collection and retrying the
// builtin once.
- ExitFrame::Mode mode = is_debug_break
- ? ExitFrame::MODE_DEBUG
- : ExitFrame::MODE_NORMAL;
-
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode);
+ __ EnterExitFrame(mode_);
// r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@@ -6391,7 +6427,6 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
false,
false);
@@ -6400,7 +6435,6 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
false);
@@ -6411,7 +6445,6 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
true);
@@ -6445,8 +6478,7 @@
// r1: function
// r2: receiver
// r3: argc
- __ add(r4, sp, Operand((kNumCalleeSaved + 1)*kPointerSize));
- __ ldr(r4, MemOperand(r4)); // argv
+ __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
// Push a frame with special values setup to mark it as an entry frame.
// r0: code entry
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index ccca2e9..0384485 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -32,6 +32,7 @@
namespace internal {
// Forward declarations
+class CompilationInfo;
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
@@ -149,11 +150,21 @@
class CodeGenerator: public AstVisitor {
public:
+ // Compilation mode. Either the compiler is used as the primary
+ // compiler and needs to setup everything or the compiler is used as
+ // the secondary compiler for split compilation and has to handle
+ // bailouts.
+ enum Mode {
+ PRIMARY,
+ SECONDARY
+ };
+
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun,
Handle<Script> script,
- bool is_eval);
+ bool is_eval,
+ CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(FunctionLiteral* fun);
@@ -201,8 +212,7 @@
private:
// Construction/Destruction
- CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
- virtual ~CodeGenerator() { delete masm_; }
+ CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
// Accessors
Scope* scope() const { return scope_; }
@@ -239,7 +249,7 @@
inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
- void GenCode(FunctionLiteral* fun);
+ void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
// The following are used by class Reference.
void LoadReference(Reference* ref);
@@ -443,6 +453,7 @@
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
+ friend class FastCodeGenerator;
friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index fc9808d..6eb5239 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -98,7 +98,7 @@
__ mov(r0, Operand(0)); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break()));
- CEntryDebugBreakStub ceb;
+ CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the expression
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
new file mode 100644
index 0000000..1aeea7a
--- /dev/null
+++ b/src/arm/fast-codegen-arm.cc
@@ -0,0 +1,140 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+ // Offset 2 is due to return address and saved frame pointer.
+ int index = 2 + function()->scope()->num_parameters();
+ __ ldr(reg, MemOperand(sp, index * kPointerSize));
+}
+
+
+void FastCodeGenerator::EmitReceiverMapCheck() {
+ Comment cmnt(masm(), ";; MapCheck(this)");
+ if (FLAG_print_ir) {
+ PrintF("MapCheck(this)\n");
+ }
+
+ EmitLoadReceiver(r1);
+ __ BranchOnSmi(r1, bailout());
+
+ ASSERT(has_receiver() && receiver()->IsHeapObject());
+ Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
+ Handle<Map> map(object->map());
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ mov(ip, Operand(map));
+ __ cmp(r3, ip);
+ __ b(ne, bailout());
+}
+
+
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+ // Compile global variable accesses as load IC calls. The only live
+ // registers are cp (context) and possibly r1 (this). Both are also saved
+ // in the stack and cp is preserved by the call.
+ __ ldr(ip, CodeGenerator::GlobalObject());
+ __ push(ip);
+ __ mov(r2, Operand(name));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ if (has_this_properties()) {
+ // Restore this.
+ EmitLoadReceiver(r1);
+ }
+}
+
+
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+ LookupResult lookup;
+ receiver()->Lookup(*name, &lookup);
+
+ ASSERT(lookup.holder() == *receiver());
+ ASSERT(lookup.type() == FIELD);
+ Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+ int index = lookup.GetFieldIndex() - map->inobject_properties();
+ int offset = index * kPointerSize;
+
+ // Negative offsets are inobject properties.
+ if (offset < 0) {
+ offset += map->instance_size();
+ __ mov(r2, r1); // Copy receiver for write barrier.
+ } else {
+ offset += FixedArray::kHeaderSize;
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kPropertiesOffset));
+ }
+ // Perform the store.
+ __ str(r0, FieldMemOperand(r2, offset));
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r2, r3, ip);
+}
+
+
+void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
+ ASSERT(function_ == NULL);
+ ASSERT(info_ == NULL);
+ function_ = fun;
+ info_ = info;
+
+ // Save the caller's frame pointer and set up our own.
+ Comment prologue_cmnt(masm(), ";; Prologue");
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ // Note that we keep a live register reference to cp (context) at
+ // this point.
+
+ // Receiver (this) is allocated to r1 if there are this properties.
+ if (has_this_properties()) EmitReceiverMapCheck();
+
+ VisitStatements(fun->body());
+
+ Comment return_cmnt(masm(), ";; Return(<undefined>)");
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+
+ Comment epilogue_cmnt(masm(), ";; Epilogue");
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ int32_t sp_delta = (fun->scope()->num_parameters() + 1) * kPointerSize;
+ __ add(sp, sp, Operand(sp_delta));
+ __ Jump(lr);
+
+ __ bind(&bailout_);
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 8d1cfeb..9f240dd 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -52,80 +52,90 @@
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm.h for its layout.
-void FullCodeGenerator::Generate(FunctionLiteral* fun) {
+void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
function_ = fun;
SetFunctionPosition(fun);
- int locals_count = fun->scope()->num_stack_slots();
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop below.
+ if (mode == PRIMARY) {
+ int locals_count = fun->scope()->num_stack_slots();
+
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ if (locals_count > 0) {
+ // Load undefined value here, so the value is ready for the loop
+ // below.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- // Adjust fp to point to caller's fp.
- __ add(fp, sp, Operand(2 * kPointerSize));
-
- { Comment cmnt(masm_, "[ Allocate locals");
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
}
- }
+ // Adjust fp to point to caller's fp.
+ __ add(fp, sp, Operand(2 * kPointerSize));
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- if (fun->scope()->num_heap_slots() > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is in r1.
- __ push(r1);
- __ CallRuntime(Runtime::kNewContext, 1);
- function_in_register = false;
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = fun->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = fun->scope()->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context
- __ str(r0, MemOperand(cp, Context::SlotOffset(slot->index())));
+ { Comment cmnt(masm_, "[ Allocate locals");
+ for (int i = 0; i < locals_count; i++) {
+ __ push(ip);
}
}
- }
- Variable* arguments = fun->scope()->arguments()->AsVariable();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
- // Load this again, if it's used by the local context below.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(r3, r1);
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is in r1.
+ __ push(r1);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both r0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ldr(r0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ __ mov(r1, Operand(Context::SlotOffset(slot->index())));
+ __ str(r0, MemOperand(cp, r1));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering cp.
+ __ mov(r2, Operand(cp));
+ __ RecordWrite(r2, r1, r0);
+ }
+ }
}
- // Receiver is just before the parameters on the caller's stack.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
- fun->num_parameters() * kPointerSize));
- __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
- __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ CallStub(&stub);
- // Duplicate the value; move-to-slot operation might clobber registers.
- __ mov(r3, r0);
- Move(arguments->slot(), r0, r1, r2);
- Slot* dot_arguments_slot =
- fun->scope()->arguments_shadow()->AsVariable()->slot();
- Move(dot_arguments_slot, r3, r1, r2);
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register) {
+ // Load this again, if it's used by the local context below.
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(r3, r1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
+ __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Duplicate the value; move-to-slot operation might clobber registers.
+ __ mov(r3, r0);
+ Move(arguments->slot(), r0, r1, r2);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, r3, r1, r2);
+ }
}
// Check the stack for overflow or break request.
@@ -133,15 +143,15 @@
// added to the implicit 8 byte offset that always applies to operations
// with pc and gives a return address 12 bytes down.
{ Comment cmnt(masm_, "[ Stack check");
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- __ add(lr, pc, Operand(Assembler::kInstrSize));
- __ cmp(sp, Operand(r2));
- StackCheckStub stub;
- __ mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
+ __ add(lr, pc, Operand(Assembler::kInstrSize));
+ __ cmp(sp, Operand(r2));
+ StackCheckStub stub;
+ __ mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
{ Comment cmnt(masm_, "[ Declarations");
@@ -581,7 +591,8 @@
int offset = Context::SlotOffset(slot->index());
__ mov(r2, Operand(offset));
// We know that we have written a function, which is not a smi.
- __ RecordWrite(cp, r2, result_register());
+ __ mov(r1, Operand(cp));
+ __ RecordWrite(r1, r2, result_register());
}
break;
@@ -1372,6 +1383,46 @@
break;
}
+ case Token::SUB: {
+ Comment cmt(masm_, "[ UnaryOperation (SUB)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register r0.
+ VisitForValue(expr->expression(), kAccumulator);
+ __ CallStub(&stub);
+ Apply(context_, r0);
+ break;
+ }
+
+ case Token::BIT_NOT: {
+ Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register r0.
+ VisitForValue(expr->expression(), kAccumulator);
+ // Avoid calling the stub for Smis.
+ Label smi, done;
+ __ tst(result_register(), Operand(kSmiTagMask));
+ __ b(eq, &smi);
+ // Non-smi: call stub leaving result in accumulator register.
+ __ CallStub(&stub);
+ __ b(&done);
+ // Perform operation directly on Smis.
+ __ bind(&smi);
+ __ mvn(result_register(), Operand(result_register()));
+ // Bit-clear inverted smi-tag.
+ __ bic(result_register(), result_register(), Operand(kSmiTagMask));
+ __ bind(&done);
+ Apply(context_, result_register());
+ break;
+ }
+
default:
UNREACHABLE();
}
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index b59c3f0..bae1e96 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -170,7 +170,6 @@
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
-
Label miss;
__ ldr(r0, MemOperand(sp, 0));
@@ -204,7 +203,6 @@
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
-
Label miss;
// Load receiver.
@@ -318,7 +316,6 @@
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
-
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack into r1.
@@ -451,7 +448,6 @@
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
-
Label miss, probe, global;
__ ldr(r0, MemOperand(sp, 0));
@@ -543,6 +539,8 @@
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
+ // -----------------------------------
+
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit());
@@ -555,6 +553,7 @@
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
+ // -----------------------------------
Label slow, fast;
// Get the key and receiver object from the stack.
@@ -622,6 +621,8 @@
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
+ // -----------------------------------
+
GenerateGeneric(masm);
}
@@ -640,6 +641,7 @@
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
+ // -----------------------------------
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
@@ -654,7 +656,9 @@
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
+ // -----------------------------------
Label slow, fast, array, extra, exit;
+
// Get the key and the object from the stack.
__ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver
// Check that the key is a smi.
@@ -806,7 +810,7 @@
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// Cache miss: Jump to runtime.
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+ GenerateMiss(masm);
}
@@ -827,7 +831,7 @@
}
-void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r2 : name
@@ -839,7 +843,7 @@
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
// Perform tail call to the entry.
- __ TailCallRuntime(f, 3, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
}
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 6c3bbbb..b39404e 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -205,6 +205,11 @@
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object, Register offset,
Register scratch) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp));
+
// This is how much we shift the remembered set bit offset to get the
// offset of the word in the remembered set. We divide by kBitsPerInt (32,
// shift right 5) and then multiply by kIntSize (4, shift left 2).
@@ -272,6 +277,14 @@
str(scratch, MemOperand(object));
bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ mov(object, Operand(bit_cast<int32_t>(kZapValue)));
+ mov(offset, Operand(bit_cast<int32_t>(kZapValue)));
+ mov(scratch, Operand(bit_cast<int32_t>(kZapValue)));
+ }
}
@@ -1035,9 +1048,13 @@
return;
}
- Runtime::FunctionId function_id =
- static_cast<Runtime::FunctionId>(f->stub_id);
- RuntimeStub stub(function_id, num_arguments);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r0, Operand(num_arguments));
+ mov(r1, Operand(ExternalReference(f)));
+ CEntryStub stub(1);
CallStub(&stub);
}
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 687fb1e..d19a683 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -362,6 +362,369 @@
}
+static void GenerateCallFunction(MacroAssembler* masm,
+ Object* object,
+ const ParameterCount& arguments,
+ Label* miss) {
+ // ----------- S t a t e -------------
+ // -- r0: receiver
+ // -- r1: function to call
+ // -----------------------------------
+
+ // Check that the function really is a function.
+ __ BranchOnSmi(r1, miss);
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, miss);
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+ __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
+ }
+
+ // Invoke the function.
+ __ InvokeFunction(r1, arguments, JUMP_FUNCTION);
+}
+
+
+static void GenerateCallConstFunction(MacroAssembler* masm,
+ JSFunction* function,
+ const ParameterCount& arguments) {
+ ASSERT(function->is_compiled());
+
+ // Get the function and setup the context.
+ __ mov(r1, Operand(Handle<JSFunction>(function)));
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ __ InvokeCode(code, expected, arguments,
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+}
+
+
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+ StubCompiler* stub_compiler,
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ BranchOnSmi(receiver, miss);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ stub_compiler->CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, name, miss);
+
+ if (lookup->IsValid() && lookup->IsCacheable()) {
+ compiler->CompileCacheable(masm,
+ stub_compiler,
+ receiver,
+ reg,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ miss);
+ } else {
+ compiler->CompileRegular(masm,
+ receiver,
+ reg,
+ scratch2,
+ holder,
+ miss);
+ }
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ JSObject* holder_obj) {
+ __ push(receiver);
+ __ push(holder);
+ __ push(name);
+ InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ ASSERT(!Heap::InNewSpace(interceptor));
+
+ Register scratch = receiver;
+ __ mov(scratch, Operand(Handle<Object>(interceptor)));
+ __ push(scratch);
+ __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
+ __ push(scratch);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ JSObject* holder_obj) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
+ __ mov(r0, Operand(5));
+ __ mov(r1, Operand(ref));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+}
+
+
+class LoadInterceptorCompiler BASE_EMBEDDED {
+ public:
+ explicit LoadInterceptorCompiler(Register name) : name_(name) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ AccessorInfo* callback = 0;
+ bool optimize = false;
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ if (lookup->type() == FIELD) {
+ optimize = true;
+ } else if (lookup->type() == CALLBACKS) {
+ Object* callback_object = lookup->GetCallbackObject();
+ if (callback_object->IsAccessorInfo()) {
+ callback = AccessorInfo::cast(callback_object);
+ optimize = callback->getter() != NULL;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ // Note: starting a frame here makes GC aware of pointers pushed below.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS) {
+ __ push(receiver);
+ }
+ __ push(holder);
+ __ push(name_);
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ Label interceptor_failed;
+ // Compare with no_interceptor_result_sentinel.
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1);
+ __ b(eq, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ __ pop(name_);
+ __ pop(holder);
+
+ if (lookup->type() == CALLBACKS) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ if (lookup->type() == FIELD) {
+ holder = stub_compiler->CheckPrototypes(holder_obj,
+ holder,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ name,
+ miss_label);
+ stub_compiler->GenerateFastPropertyLoad(masm,
+ r0,
+ holder,
+ lookup->holder(),
+ lookup->GetFieldIndex());
+ __ Ret();
+ } else {
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ Label cleanup;
+ __ pop(scratch2);
+ __ push(receiver);
+ __ push(scratch2);
+
+ holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ &cleanup);
+
+ __ push(holder);
+ __ Move(holder, Handle<AccessorInfo>(callback));
+ __ push(holder);
+ __ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
+ __ push(scratch1);
+ __ push(name_);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallRuntime(ref, 5, 1);
+
+ __ bind(&cleanup);
+ __ pop(scratch1);
+ __ pop(scratch2);
+ __ push(scratch1);
+ }
+ }
+
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallRuntime(ref, 5, 1);
+ }
+
+ private:
+ Register name_;
+};
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+ CallInterceptorCompiler(const ParameterCount& arguments, Register name)
+ : arguments_(arguments), argc_(arguments.immediate()), name_(name) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ JSFunction* function = 0;
+ bool optimize = false;
+ // So far the most popular case for failed interceptor is
+ // CONSTANT_FUNCTION sitting below.
+ if (lookup->type() == CONSTANT_FUNCTION) {
+ function = lookup->GetConstantFunction();
+ // JSArray holder is a special case for call constant function
+ // (see the corresponding code).
+ if (function->is_compiled() && !holder_obj->IsJSArray()) {
+ optimize = true;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ // Constant functions cannot sit on global object.
+ ASSERT(!lookup->holder()->IsGlobalObject());
+
+ __ EnterInternalFrame();
+ __ push(holder); // Save the holder.
+ __ push(name_); // Save the name.
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ ASSERT(!r0.is(name_));
+ ASSERT(!r0.is(scratch1));
+ __ pop(name_); // Restore the name.
+ __ pop(scratch1); // Restore the holder.
+ __ LeaveInternalFrame();
+
+ // Compare with no_interceptor_result_sentinel.
+ __ LoadRoot(scratch2, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch2);
+ Label invoke;
+ __ b(ne, &invoke);
+
+ stub_compiler->CheckPrototypes(holder_obj, scratch1,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ miss_label);
+ GenerateCallConstFunction(masm, function, arguments_);
+
+ __ bind(&invoke);
+ }
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ __ EnterInternalFrame();
+ // Save the name_ register across the call.
+ __ push(name_);
+
+ PushInterceptorArguments(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
+ __ mov(r0, Operand(5));
+ __ mov(r1, Operand(ref));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+
+ // Restore the name_ register.
+ __ pop(name_);
+ __ LeaveInternalFrame();
+ }
+
+ private:
+ const ParameterCount& arguments_;
+ int argc_;
+ Register name_;
+};
+
+
#undef __
#define __ ACCESS_MASM(masm())
@@ -491,30 +854,18 @@
Register scratch2,
String* name,
Label* miss) {
- // Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
-
- // Push the arguments on the JS stack of the caller.
- __ push(receiver); // receiver
- __ push(reg); // holder
- __ push(name_reg); // name
-
- InterceptorInfo* interceptor = holder->GetNamedInterceptor();
- ASSERT(!Heap::InNewSpace(interceptor));
- __ mov(scratch1, Operand(Handle<Object>(interceptor)));
- __ push(scratch1);
- __ ldr(scratch2, FieldMemOperand(scratch1, InterceptorInfo::kDataOffset));
- __ push(scratch2);
-
- // Do tail-call to the runtime system.
- ExternalReference load_ic_property =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallRuntime(load_ic_property, 5, 1);
+ LoadInterceptorCompiler compiler(name_reg);
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ object,
+ holder,
+ name,
+ lookup,
+ receiver,
+ scratch1,
+ scratch2,
+ miss);
}
@@ -572,22 +923,7 @@
CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
- // Check that the function really is a function.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
- // Get the map.
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
- // Invoke the function.
- __ InvokeFunction(r1, arguments(), JUMP_FUNCTION);
+ GenerateCallFunction(masm(), object, arguments(), &miss);
// Handle call cache miss.
__ bind(&miss);
@@ -715,16 +1051,7 @@
UNREACHABLE();
}
- // Get the function and setup the context.
- __ mov(r1, Operand(Handle<JSFunction>(function)));
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- ASSERT(function->is_compiled());
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ GenerateCallConstFunction(masm(), function, arguments());
// Handle call cache miss.
__ bind(&miss);
@@ -748,7 +1075,34 @@
// -----------------------------------
Label miss;
- // TODO(1224669): Implement.
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
+ // Get the receiver from the stack into r0.
+ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+ // Load the name from the stack into r1.
+ __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
+
+ CallInterceptorCompiler compiler(arguments(), r1);
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ JSObject::cast(object),
+ holder,
+ name,
+ &lookup,
+ r0,
+ r2,
+ r3,
+ &miss);
+
+ // Restore receiver.
+ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+
+ GenerateCallFunction(masm(), object, arguments(), &miss);
// Handle call cache miss.
__ bind(&miss);
@@ -921,7 +1275,6 @@
// Handle store cache miss.
__ bind(&miss);
- __ mov(r2, Operand(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -973,7 +1326,6 @@
// Handle store cache miss.
__ bind(&miss);
- __ mov(r2, Operand(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1099,7 +1451,7 @@
__ ldr(r0, MemOperand(sp, 0));
LookupResult lookup;
- holder->LocalLookupRealNamedProperty(name, &lookup);
+ LookupPostInterceptor(holder, name, &lookup);
GenerateLoadInterceptor(object,
holder,
&lookup,
@@ -1265,7 +1617,7 @@
__ b(ne, &miss);
LookupResult lookup;
- holder->LocalLookupRealNamedProperty(name, &lookup);
+ LookupPostInterceptor(holder, name, &lookup);
GenerateLoadInterceptor(receiver,
holder,
&lookup,
diff --git a/src/arm/stub-cache-arm.cc.rej b/src/arm/stub-cache-arm.cc.rej
new file mode 100644
index 0000000..f8baa41
--- /dev/null
+++ b/src/arm/stub-cache-arm.cc.rej
@@ -0,0 +1,153 @@
+*************** void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+*** 491,520 ****
+ Register scratch2,
+ String* name,
+ Label* miss) {
+- // Check that the receiver isn't a smi.
+- __ tst(receiver, Operand(kSmiTagMask));
+- __ b(eq, miss);
+-
+- // Check that the maps haven't changed.
+- Register reg =
+- CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+-
+- // Push the arguments on the JS stack of the caller.
+- __ push(receiver); // receiver
+- __ push(reg); // holder
+- __ push(name_reg); // name
+-
+- InterceptorInfo* interceptor = holder->GetNamedInterceptor();
+- ASSERT(!Heap::InNewSpace(interceptor));
+- __ mov(scratch1, Operand(Handle<Object>(interceptor)));
+- __ push(scratch1);
+- __ ldr(scratch2, FieldMemOperand(scratch1, InterceptorInfo::kDataOffset));
+- __ push(scratch2);
+-
+- // Do tail-call to the runtime system.
+- ExternalReference load_ic_property =
+- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+- __ TailCallRuntime(load_ic_property, 5, 1);
+ }
+
+
+--- 854,871 ----
+ Register scratch2,
+ String* name,
+ Label* miss) {
++ LoadInterceptorCompiler compiler(name_reg);
++ CompileLoadInterceptor(&compiler,
++ this,
++ masm(),
++ object,
++ holder,
++ name,
++ lookup,
++ receiver,
++ scratch1,
++ scratch2,
++ miss);
+ }
+
+
+*************** Object* CallStubCompiler::CompileCallField(Object* object,
+*** 572,593 ****
+ CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
+ GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
+
+- // Check that the function really is a function.
+- __ tst(r1, Operand(kSmiTagMask));
+- __ b(eq, &miss);
+- // Get the map.
+- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+- __ b(ne, &miss);
+-
+- // Patch the receiver on the stack with the global proxy if
+- // necessary.
+- if (object->IsGlobalObject()) {
+- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+- __ str(r3, MemOperand(sp, argc * kPointerSize));
+- }
+-
+- // Invoke the function.
+- __ InvokeFunction(r1, arguments(), JUMP_FUNCTION);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+--- 923,929 ----
+ CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
+ GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
+
++ GenerateCallFunction(masm(), object, arguments(), &miss);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+*************** Object* CallStubCompiler::CompileCallConstant(Object* object,
+*** 715,730 ****
+ UNREACHABLE();
+ }
+
+- // Get the function and setup the context.
+- __ mov(r1, Operand(Handle<JSFunction>(function)));
+- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+-
+- // Jump to the cached code (tail call).
+- ASSERT(function->is_compiled());
+- Handle<Code> code(function->code());
+- ParameterCount expected(function->shared()->formal_parameter_count());
+- __ InvokeCode(code, expected, arguments(),
+- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+--- 1051,1057 ----
+ UNREACHABLE();
+ }
+
++ GenerateCallConstFunction(masm(), function, arguments());
+
+ // Handle call cache miss.
+ __ bind(&miss);
+*************** Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+*** 748,754 ****
+ // -----------------------------------
+ Label miss;
+
+- // TODO(1224669): Implement.
+
+ // Handle call cache miss.
+ __ bind(&miss);
+--- 1075,1108 ----
+ // -----------------------------------
+ Label miss;
+
++ // Get the number of arguments.
++ const int argc = arguments().immediate();
++
++ LookupResult lookup;
++ LookupPostInterceptor(holder, name, &lookup);
++
++ // Get the receiver from the stack into r0.
++ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
++ // Load the name from the stack into r1.
++ __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
++
++ CallInterceptorCompiler compiler(arguments(), r1);
++ CompileLoadInterceptor(&compiler,
++ this,
++ masm(),
++ JSObject::cast(object),
++ holder,
++ name,
++ &lookup,
++ r0,
++ r2,
++ r3,
++ &miss);
++
++ // Restore receiver.
++ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
++
++ GenerateCallFunction(masm(), object, arguments(), &miss);
+
+ // Handle call cache miss.
+ __ bind(&miss);
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index a33ebd4..7a8ac72 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -219,36 +219,15 @@
}
-void VirtualFrame::RawCallStub(CodeStub* stub) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallStub(stub);
-}
-
-
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
- PrepareForCall(0, 0);
- arg->Unuse();
- RawCallStub(stub);
-}
-
-
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
- PrepareForCall(0, 0);
- arg0->Unuse();
- arg1->Unuse();
- RawCallStub(stub);
-}
-
-
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- PrepareForCall(arg_count, arg_count);
+ Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
}
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- PrepareForCall(arg_count, arg_count);
+ Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
}
@@ -257,102 +236,34 @@
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
int arg_count) {
- PrepareForCall(arg_count, arg_count);
+ Forget(arg_count);
__ InvokeBuiltin(id, flags);
}
-void VirtualFrame::RawCallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
-}
-
-
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
- int spilled_args = 0;
switch (code->kind()) {
case Code::CALL_IC:
- spilled_args = dropped_args + 1;
- break;
case Code::FUNCTION:
- spilled_args = dropped_args + 1;
break;
case Code::KEYED_LOAD_IC:
- ASSERT(dropped_args == 0);
- spilled_args = 2;
- break;
- default:
- // The other types of code objects are called with values
- // in specific registers, and are handled in functions with
- // a different signature.
- UNREACHABLE();
- break;
- }
- PrepareForCall(spilled_args, dropped_args);
- RawCallCodeObject(code, rmode);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args) {
- int spilled_args = 0;
- switch (code->kind()) {
case Code::LOAD_IC:
- ASSERT(arg->reg().is(r2));
- ASSERT(dropped_args == 0);
- spilled_args = 1;
- break;
case Code::KEYED_STORE_IC:
- ASSERT(arg->reg().is(r0));
- ASSERT(dropped_args == 0);
- spilled_args = 2;
- break;
- default:
- // No other types of code objects are called with values
- // in exactly one register.
- UNREACHABLE();
- break;
- }
- PrepareForCall(spilled_args, dropped_args);
- arg->Unuse();
- RawCallCodeObject(code, rmode);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args) {
- int spilled_args = 1;
- switch (code->kind()) {
case Code::STORE_IC:
- ASSERT(arg0->reg().is(r0));
- ASSERT(arg1->reg().is(r2));
ASSERT(dropped_args == 0);
- spilled_args = 1;
break;
case Code::BUILTIN:
ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
- ASSERT(arg0->reg().is(r0));
- ASSERT(arg1->reg().is(r1));
- spilled_args = dropped_args + 1;
break;
default:
- // No other types of code objects are called with values
- // in exactly two registers.
UNREACHABLE();
break;
}
- PrepareForCall(spilled_args, dropped_args);
- arg0->Unuse();
- arg1->Unuse();
- RawCallCodeObject(code, rmode);
+ Forget(dropped_args);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ Call(code, rmode);
}
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index b2f0eea..9a2f7d3 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -287,18 +287,11 @@
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
void CallStub(CodeStub* stub, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- RawCallStub(stub);
+ Forget(arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ masm()->CallStub(stub);
}
- // Call stub that expects its argument in r0. The argument is given
- // as a result which must be the register r0.
- void CallStub(CodeStub* stub, Result* arg);
-
- // Call stub that expects its arguments in r1 and r0. The arguments
- // are given as results which must be the appropriate registers.
- void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
void CallRuntime(Runtime::Function* f, int arg_count);
@@ -311,20 +304,11 @@
int arg_count);
// Call into an IC stub given the number of arguments it removes
- // from the stack. Register arguments are passed as results and
- // consumed by the call.
+ // from the stack. Register arguments to the IC stub are implicit,
+ // and depend on the type of IC stub.
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
int dropped_args);
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args);
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args);
// Drop a number of elements from the top of the expression stack. May
// emit code to affect the physical frame. Does not clobber any registers
@@ -511,14 +495,6 @@
// Register counts are correctly updated.
int InvalidateFrameSlotAt(int index);
- // Call a code stub that has already been prepared for calling (via
- // PrepareForCall).
- void RawCallStub(CodeStub* stub);
-
- // Calls a code object which has already been prepared for calling
- // (via PrepareForCall).
- void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.