Update V8 to r4588
We're using WebKit r58033, as used by
http://src.chromium.org/svn/releases/5.0.387.0/DEPS
This requires http://v8.googlecode.com/svn/trunk@4465 but this version has a
crashing bug for ARM. Instead we use http://v8.googlecode.com/svn/trunk@4588,
which is used by http://src.chromium.org/svn/releases/6.0.399.0/DEPS
Note that a trivial bug fix was required in arm/codegen-arm.cc. This is guarded
with ANDROID. See http://code.google.com/p/v8/issues/detail?id=703
Change-Id: I459647a8286c4f8c7405f0c5581ecbf051a6f1e8
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 4f5ae3e..cc730f2 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -522,7 +522,9 @@
int32_t pc_offset() const { return pc_ - buffer_; }
int32_t current_position() const { return current_position_; }
- int32_t current_statement_position() const { return current_position_; }
+ int32_t current_statement_position() const {
+ return current_statement_position_;
+ }
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 3bd42ed..04bcfeb 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -74,7 +74,99 @@
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
- UNIMPLEMENTED_MIPS();
+ // Called from JSEntryStub::GenerateBody
+
+ // Registers:
+ // a0: entry_address
+ // a1: function
+ // a2: reveiver_pointer
+ // a3: argc
+ // s0: argv
+ //
+ // Stack:
+ // arguments slots
+ // handler frame
+ // entry frame
+ // callee saved registers + ra
+ // 4 args slots
+ // args
+
+ // Clear the context before we push it when entering the JS frame.
+ __ li(cp, Operand(0));
+
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Set up the context from the function argument.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Set up the roots register.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ li(s6, Operand(roots_address));
+
+ // Push the function and the receiver onto the stack.
+ __ MultiPushReversed(a1.bit() | a2.bit());
+
+ // Copy arguments to the stack in a loop.
+ // a3: argc
+ // s0: argv, ie points to first arg
+ Label loop, entry;
+ __ sll(t0, a3, kPointerSizeLog2);
+ __ add(t2, s0, t0);
+ __ b(&entry);
+ __ nop(); // Branch delay slot nop.
+ // t2 points past last arg.
+ __ bind(&loop);
+ __ lw(t0, MemOperand(s0)); // Read next parameter.
+ __ addiu(s0, s0, kPointerSize);
+ __ lw(t0, MemOperand(t0)); // Dereference handle.
+ __ Push(t0); // Push parameter.
+ __ bind(&entry);
+ __ Branch(ne, &loop, s0, Operand(t2));
+
+ // Registers:
+ // a0: entry_address
+ // a1: function
+ // a2: reveiver_pointer
+ // a3: argc
+ // s0: argv
+ // s6: roots_address
+ //
+ // Stack:
+ // arguments
+ // receiver
+ // function
+ // arguments slots
+ // handler frame
+ // entry frame
+ // callee saved registers + ra
+ // 4 args slots
+ // args
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
+ __ mov(s1, t4);
+ __ mov(s2, t4);
+ __ mov(s3, t4);
+ __ mov(s4, s4);
+ __ mov(s5, t4);
+ // s6 holds the root address. Do not clobber.
+ // s7 is cp. Do not init.
+
+ // Invoke the code and pass argc as a0.
+ __ mov(a0, a3);
+ if (is_construct) {
+ UNIMPLEMENTED_MIPS();
+ __ break_(0x164);
+ } else {
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION);
+ }
+
+ __ LeaveInternalFrame();
+
+ __ Jump(ra);
}
@@ -100,6 +192,7 @@
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
+ __ break_(0x201);
}
diff --git a/src/mips/codegen-mips-inl.h b/src/mips/codegen-mips-inl.h
index 2a77715..3a511b8 100644
--- a/src/mips/codegen-mips-inl.h
+++ b/src/mips/codegen-mips-inl.h
@@ -36,15 +36,29 @@
// Platform-specific inline functions.
-void DeferredCode::Jump() { __ b(&entry_label_); }
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
+void DeferredCode::Jump() {
+ __ b(&entry_label_);
+ __ nop();
}
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
+void Reference::GetValueAndSpill() {
+ GetValue();
+}
+
+
+void CodeGenerator::VisitAndSpill(Statement* statement) {
+ Visit(statement);
+}
+
+
+void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+ VisitStatements(statements);
+}
+
+
+void CodeGenerator::LoadAndSpill(Expression* expression) {
+ Load(expression);
}
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 2de45f6..ca1edd4 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -30,12 +30,14 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "debug.h"
+#include "ic-inl.h"
#include "parser.h"
#include "register-allocator-inl.h"
#include "runtime.h"
#include "scopes.h"
-#include "compiler.h"
+#include "virtual-frame-inl.h"
@@ -46,7 +48,7 @@
-// -------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
@@ -60,13 +62,41 @@
}
-// -------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
+// CodeGenState implementation.
+
+CodeGenState::CodeGenState(CodeGenerator* owner)
+ : owner_(owner),
+ true_target_(NULL),
+ false_target_(NULL),
+ previous_(NULL) {
+ owner_->set_state(this);
+}
+
+
+CodeGenState::CodeGenState(CodeGenerator* owner,
+ JumpTarget* true_target,
+ JumpTarget* false_target)
+ : owner_(owner),
+ true_target_(true_target),
+ false_target_(false_target),
+ previous_(owner->state()) {
+ owner_->set_state(this);
+}
+
+
+CodeGenState::~CodeGenState() {
+ ASSERT(owner_->state() == this);
+ owner_->set_state(previous_);
+}
+
+
+// -----------------------------------------------------------------------------
// CodeGenerator implementation
CodeGenerator::CodeGenerator(MacroAssembler* masm)
: deferred_(8),
masm_(masm),
- scope_(NULL),
frame_(NULL),
allocator_(NULL),
cc_reg_(cc_always),
@@ -76,18 +106,362 @@
// Calling conventions:
-// s8_fp: caller's frame pointer
+// fp: caller's frame pointer
// sp: stack pointer
// a1: called JS function
// cp: callee's context
-void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
- UNIMPLEMENTED_MIPS();
+void CodeGenerator::Generate(CompilationInfo* info) {
+ // Record the position for debugging purposes.
+ CodeForFunctionPosition(info->function());
+
+ // Initialize state.
+ info_ = info;
+ ASSERT(allocator_ == NULL);
+ RegisterAllocator register_allocator(this);
+ allocator_ = ®ister_allocator;
+ ASSERT(frame_ == NULL);
+ frame_ = new VirtualFrame();
+ cc_reg_ = cc_always;
+
+ {
+ CodeGenState state(this);
+
+ // Registers:
+ // a1: called JS function
+ // ra: return address
+ // fp: caller's frame pointer
+ // sp: stack pointer
+ // cp: callee's context
+ //
+ // Stack:
+ // arguments
+ // receiver
+
+ frame_->Enter();
+
+ // Allocate space for locals and initialize them.
+ frame_->AllocateStackSlots();
+
+ // Initialize the function return target.
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+ function_return_is_shadowed_ = false;
+
+ VirtualFrame::SpilledScope spilled_scope;
+ if (scope()->num_heap_slots() > 0) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope()->num_parameters(); i++) {
+ UNIMPLEMENTED_MIPS();
+ }
+ }
+
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in the
+ // context.
+ if (scope()->arguments() != NULL) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ // Generate code to 'execute' declarations and initialize functions
+ // (source elements). In case of an illegal redeclaration we need to
+ // handle that instead of processing the declarations.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ illegal redeclarations");
+ scope()->VisitIllegalRedeclaration(this);
+ } else {
+ Comment cmnt(masm_, "[ declarations");
+ ProcessDeclarations(scope()->declarations());
+ // Bail out if a stack-overflow exception occurred when processing
+ // declarations.
+ if (HasStackOverflow()) return;
+ }
+
+ if (FLAG_trace) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ // Compile the body of the function in a vanilla state. Don't
+ // bother compiling all the code if the scope has an illegal
+ // redeclaration.
+ if (!scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ function body");
+#ifdef DEBUG
+ bool is_builtin = Bootstrapper::IsActive();
+ bool should_trace =
+ is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
+ if (should_trace) {
+ UNIMPLEMENTED_MIPS();
+ }
+#endif
+ VisitStatementsAndSpill(info->function()->body());
+ }
+ }
+
+ if (has_valid_frame() || function_return_.is_linked()) {
+ if (!function_return_.is_linked()) {
+ CodeForReturnPosition(info->function());
+ }
+ // Registers:
+ // v0: result
+ // sp: stack pointer
+ // fp: frame pointer
+ // cp: callee's context
+
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+
+ function_return_.Bind();
+ if (FLAG_trace) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+
+ masm_->mov(sp, fp);
+ masm_->lw(fp, MemOperand(sp, 0));
+ masm_->lw(ra, MemOperand(sp, 4));
+ masm_->addiu(sp, sp, 8);
+
+ // Here we use masm_-> instead of the __ macro to avoid the code coverage
+ // tool from instrumenting as we rely on the code size here.
+ // TODO(MIPS): Should we be able to use more than 0x1ffe parameters?
+ masm_->addiu(sp, sp, (scope()->num_parameters() + 1) * kPointerSize);
+ masm_->Jump(ra);
+ // The Jump automatically generates a nop in the branch delay slot.
+
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ ASSERT_EQ(kJSReturnSequenceLength,
+ masm_->InstructionsGeneratedSince(&check_exit_codesize));
+ }
+
+ // Code generation state must be reset.
+ ASSERT(!has_cc());
+ ASSERT(state_ == NULL);
+ ASSERT(!function_return_is_shadowed_);
+ function_return_.Unuse();
+ DeleteFrame();
+
+ // Process any deferred code using the register allocator.
+ if (!HasStackOverflow()) {
+ ProcessDeferred();
+ }
+
+ allocator_ = NULL;
+}
+
+
+void CodeGenerator::LoadReference(Reference* ref) {
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ LoadReference");
+ Expression* e = ref->expression();
+ Property* property = e->AsProperty();
+ Variable* var = e->AsVariableProxy()->AsVariable();
+
+ if (property != NULL) {
+ UNIMPLEMENTED_MIPS();
+ } else if (var != NULL) {
+ // The expression is a variable proxy that does not rewrite to a
+ // property. Global variables are treated as named property references.
+ if (var->is_global()) {
+ LoadGlobal();
+ ref->set_type(Reference::NAMED);
+ } else {
+ ASSERT(var->slot() != NULL);
+ ref->set_type(Reference::SLOT);
+ }
+ } else {
+ UNIMPLEMENTED_MIPS();
+ }
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+ VirtualFrame::SpilledScope spilled_scope;
+ // Pop a reference from the stack while preserving TOS.
+ Comment cmnt(masm_, "[ UnloadReference");
+ int size = ref->size();
+ if (size > 0) {
+ frame_->EmitPop(a0);
+ frame_->Drop(size);
+ frame_->EmitPush(a0);
+ }
+ ref->set_unloaded();
+}
+
+
+MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+ // Currently, this assertion will fail if we try to assign to
+ // a constant variable that is constant because it is read-only
+ // (such as the variable referring to a named function expression).
+ // We need to implement assignments to read-only variables.
+ // Ideally, we should do this during AST generation (by converting
+ // such assignments into expression statements); however, in general
+ // we may not be able to make the decision until past AST generation,
+ // that is when the entire program is known.
+ ASSERT(slot != NULL);
+ int index = slot->index();
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ UNIMPLEMENTED_MIPS();
+ return MemOperand(no_reg, 0);
+
+ case Slot::LOCAL:
+ return frame_->LocalAt(index);
+
+ case Slot::CONTEXT: {
+ UNIMPLEMENTED_MIPS();
+ return MemOperand(no_reg, 0);
+ }
+
+ default:
+ UNREACHABLE();
+ return MemOperand(no_reg, 0);
+ }
+}
+
+
+// Loads a value on TOS. If it is a boolean value, the result may have been
+// (partially) translated into branches, or it may have set the condition
+// code register. If force_cc is set, the value is forced to set the
+// condition code register and no value is pushed. If the condition code
+// register was set, has_cc() is true and cc_reg_ contains the condition to
+// test for 'true'.
+void CodeGenerator::LoadCondition(Expression* x,
+ JumpTarget* true_target,
+ JumpTarget* false_target,
+ bool force_cc) {
+ ASSERT(!has_cc());
+ int original_height = frame_->height();
+
+ { CodeGenState new_state(this, true_target, false_target);
+ Visit(x);
+
+ // If we hit a stack overflow, we may not have actually visited
+ // the expression. In that case, we ensure that we have a
+ // valid-looking frame state because we will continue to generate
+ // code as we unwind the C++ stack.
+ //
+ // It's possible to have both a stack overflow and a valid frame
+ // state (eg, a subexpression overflowed, visiting it returned
+ // with a dummied frame state, and visiting this expression
+ // returned with a normal-looking state).
+ if (HasStackOverflow() &&
+ has_valid_frame() &&
+ !has_cc() &&
+ frame_->height() == original_height) {
+ true_target->Jump();
+ }
+ }
+ if (force_cc && frame_ != NULL && !has_cc()) {
+ // Convert the TOS value to a boolean in the condition code register.
+ UNIMPLEMENTED_MIPS();
+ }
+ ASSERT(!force_cc || !has_valid_frame() || has_cc());
+ ASSERT(!has_valid_frame() ||
+ (has_cc() && frame_->height() == original_height) ||
+ (!has_cc() && frame_->height() == original_height + 1));
+}
+
+
+void CodeGenerator::Load(Expression* x) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ JumpTarget true_target;
+ JumpTarget false_target;
+ LoadCondition(x, &true_target, &false_target, false);
+
+ if (has_cc()) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ if (true_target.is_linked() || false_target.is_linked()) {
+ UNIMPLEMENTED_MIPS();
+ }
+ ASSERT(has_valid_frame());
+ ASSERT(!has_cc());
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadGlobal() {
+ VirtualFrame::SpilledScope spilled_scope;
+ __ lw(a0, GlobalObject());
+ frame_->EmitPush(a0);
+}
+
+
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+ VirtualFrame::SpilledScope spilled_scope;
+ if (slot->type() == Slot::LOOKUP) {
+ UNIMPLEMENTED_MIPS();
+ } else {
+ __ lw(a0, SlotOperand(slot, a2));
+ frame_->EmitPush(a0);
+ if (slot->var()->mode() == Variable::CONST) {
+ UNIMPLEMENTED_MIPS();
+ }
+ }
+}
+
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+ ASSERT(slot != NULL);
+ if (slot->type() == Slot::LOOKUP) {
+ UNIMPLEMENTED_MIPS();
+ } else {
+ ASSERT(!slot->var()->is_dynamic());
+
+ JumpTarget exit;
+ if (init_state == CONST_INIT) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ // We must execute the store. Storing a variable must keep the
+ // (new) value on the stack. This is necessary for compiling
+ // assignment expressions.
+ //
+ // Note: We will reach here even with slot->var()->mode() ==
+ // Variable::CONST because of const declarations which will
+ // initialize consts to 'the hole' value and by doing so, end up
+ // calling this code. a2 may be loaded with context; used below in
+ // RecordWrite.
+ frame_->EmitPop(a0);
+ __ sw(a0, SlotOperand(slot, a2));
+ frame_->EmitPush(a0);
+ if (slot->type() == Slot::CONTEXT) {
+ UNIMPLEMENTED_MIPS();
+ }
+ // If we definitely did not jump over the assignment, we do not need
+ // to bind the exit label. Doing so can defeat peephole
+ // optimization.
+ if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
+ exit.Bind();
+ }
+ }
}
void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
- UNIMPLEMENTED_MIPS();
+ VirtualFrame::SpilledScope spilled_scope;
+ for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
+ VisitAndSpill(statements->at(i));
+ }
}
@@ -97,7 +471,14 @@
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- UNIMPLEMENTED_MIPS();
+ VirtualFrame::SpilledScope spilled_scope;
+ frame_->EmitPush(cp);
+ __ li(t0, Operand(pairs));
+ frame_->EmitPush(t0);
+ __ li(t0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+ frame_->EmitPush(t0);
+ frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+ // The result is discarded.
}
@@ -107,7 +488,17 @@
void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
- UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ ExpressionStatement");
+ CodeForStatementPosition(node);
+ Expression* expression = node->expression();
+ expression->MarkAsStatement();
+ LoadAndSpill(expression);
+ frame_->Drop();
+ ASSERT(frame_->height() == original_height);
}
@@ -132,7 +523,22 @@
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- UNIMPLEMENTED_MIPS();
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ ReturnStatement");
+
+ CodeForStatementPosition(node);
+ LoadAndSpill(node->expression());
+ if (function_return_is_shadowed_) {
+ frame_->EmitPop(v0);
+ function_return_.Jump();
+ } else {
+ // Pop the result from the frame and prepare the frame for
+ // returning thus making it easier to merge.
+ frame_->EmitPop(v0);
+ frame_->PrepareForReturn();
+
+ function_return_.Jump();
+ }
}
@@ -191,8 +597,8 @@
}
-void CodeGenerator::VisitFunctionBoilerplateLiteral(
- FunctionBoilerplateLiteral* node) {
+void CodeGenerator::VisitSharedFunctionInfoLiteral(
+ SharedFunctionInfoLiteral* node) {
UNIMPLEMENTED_MIPS();
}
@@ -203,17 +609,45 @@
void CodeGenerator::VisitSlot(Slot* node) {
- UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Slot");
+ LoadFromSlot(node, typeof_state());
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
- UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ VariableProxy");
+
+ Variable* var = node->var();
+ Expression* expr = var->rewrite();
+ if (expr != NULL) {
+ Visit(expr);
+ } else {
+ ASSERT(var->is_global());
+ Reference ref(this, node);
+ ref.GetValueAndSpill();
+ }
+ ASSERT(frame_->height() == original_height + 1);
}
void CodeGenerator::VisitLiteral(Literal* node) {
- UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Literal");
+ __ li(t0, Operand(node->handle()));
+ frame_->EmitPush(t0);
+ ASSERT(frame_->height() == original_height + 1);
}
@@ -238,7 +672,47 @@
void CodeGenerator::VisitAssignment(Assignment* node) {
- UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Assignment");
+
+ { Reference target(this, node->target());
+ if (target.is_illegal()) {
+ // Fool the virtual frame into thinking that we left the assignment's
+ // value on the frame.
+ frame_->EmitPush(zero_reg);
+ ASSERT(frame_->height() == original_height + 1);
+ return;
+ }
+
+ if (node->op() == Token::ASSIGN ||
+ node->op() == Token::INIT_VAR ||
+ node->op() == Token::INIT_CONST) {
+ LoadAndSpill(node->value());
+ } else {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ if (var != NULL &&
+ (var->mode() == Variable::CONST) &&
+ node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+ // Assignment ignored - leave the value on the stack.
+ } else {
+ CodeForSourcePosition(node->position());
+ if (node->op() == Token::INIT_CONST) {
+ // Dynamic constant initializations must use the function context
+ // and initialize the actual constant declared. Dynamic variable
+ // initializations are simply assignments and use SetValue.
+ target.SetValue(CONST_INIT);
+ } else {
+ target.SetValue(NOT_CONST_INIT);
+ }
+ }
+ }
+ ASSERT(frame_->height() == original_height + 1);
}
@@ -253,7 +727,73 @@
void CodeGenerator::VisitCall(Call* node) {
- UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Call");
+
+ Expression* function = node->expression();
+ ZoneList<Expression*>* args = node->arguments();
+
+ // Standard function call.
+ // Check if the function is a variable or a property.
+ Variable* var = function->AsVariableProxy()->AsVariable();
+ Property* property = function->AsProperty();
+
+ // ------------------------------------------------------------------------
+ // Fast-case: Use inline caching.
+ // ---
+ // According to ECMA-262, section 11.2.3, page 44, the function to call
+ // must be resolved after the arguments have been evaluated. The IC code
+ // automatically handles this by loading the arguments before the function
+ // is resolved in cache misses (this also holds for megamorphic calls).
+ // ------------------------------------------------------------------------
+
+ if (var != NULL && var->is_possibly_eval()) {
+ UNIMPLEMENTED_MIPS();
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // ----------------------------------
+ // JavaScript example: 'foo(1, 2, 3)' // foo is global
+ // ----------------------------------
+
+ int arg_count = args->length();
+
+ // We need sp to be 8 bytes aligned when calling the stub.
+ __ SetupAlignedCall(t0, arg_count);
+
+ // Pass the global object as the receiver and let the IC stub
+ // patch the stack to use the global proxy as 'this' in the
+ // invoked function.
+ LoadGlobal();
+
+ // Load the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
+
+ // Setup the receiver register and call the IC initialization code.
+ __ li(a2, Operand(var->name()));
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
+ CodeForSourcePosition(node->position());
+ frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
+ arg_count + 1);
+ __ ReturnFromAlignedCall();
+ __ lw(cp, frame_->Context());
+ // Remove the function from the stack.
+ frame_->EmitPush(v0);
+
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ UNIMPLEMENTED_MIPS();
+ } else if (property != NULL) {
+ UNIMPLEMENTED_MIPS();
+ } else {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ ASSERT(frame_->height() == original_height + 1);
}
@@ -292,6 +832,26 @@
}
+void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
// This should generate code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It is not yet implemented on ARM, so it always goes to the slow case.
@@ -300,6 +860,11 @@
}
+void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
@@ -320,12 +885,12 @@
}
-void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
-void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateRandomHeapNumber(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
@@ -413,8 +978,108 @@
#undef __
#define __ ACCESS_MASM(masm)
+// -----------------------------------------------------------------------------
+// Reference support
-// On entry a0 and a1 are the things to be compared. On exit v0 is 0,
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
+ cgen->LoadReference(this);
+}
+
+
+Reference::~Reference() {
+ ASSERT(is_unloaded() || is_illegal());
+}
+
+
+Handle<String> Reference::GetName() {
+ ASSERT(type_ == NAMED);
+ Property* property = expression_->AsProperty();
+ if (property == NULL) {
+ // Global variable reference treated as a named property reference.
+ VariableProxy* proxy = expression_->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+ return proxy->name();
+ } else {
+ Literal* raw_name = property->key()->AsLiteral();
+ ASSERT(raw_name != NULL);
+ return Handle<String>(String::cast(*raw_name->handle()));
+ }
+}
+
+
+void Reference::GetValue() {
+ ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(!is_illegal());
+ ASSERT(!cgen_->has_cc());
+ Property* property = expression_->AsProperty();
+ if (property != NULL) {
+ cgen_->CodeForSourcePosition(property->position());
+ }
+
+ switch (type_) {
+ case SLOT: {
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+
+ case NAMED: {
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+
+ case KEYED: {
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Reference::SetValue(InitState init_state) {
+ ASSERT(!is_illegal());
+ ASSERT(!cgen_->has_cc());
+ MacroAssembler* masm = cgen_->masm();
+ Property* property = expression_->AsProperty();
+ if (property != NULL) {
+ cgen_->CodeForSourcePosition(property->position());
+ }
+
+ switch (type_) {
+ case SLOT: {
+ Comment cmnt(masm, "[ Store to Slot");
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ cgen_->StoreToSlot(slot, init_state);
+ cgen_->UnloadReference(this);
+ break;
+ }
+
+ case NAMED: {
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+
+ case KEYED: {
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+// On entry a0 and a1 are the things to be compared. On exit v0 is 0,
// positive or negative to indicate the result of the comparison.
void CompareStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
@@ -422,6 +1087,12 @@
}
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ UNIMPLEMENTED_MIPS();
+ return Handle<Code>::null();
+}
+
+
void StackCheckStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x790);
@@ -446,27 +1117,274 @@
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x826);
+ // s0: number of arguments including receiver (C callee-saved)
+ // s1: pointer to the first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
+
+ if (do_gc) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ // Call C built-in.
+ // a0 = argc, a1 = argv
+ __ mov(a0, s0);
+ __ mov(a1, s1);
+
+ __ CallBuiltin(s2);
+
+ if (always_allocate) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ // Check for failure result.
+ Label failure_returned;
+ ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ __ addiu(a2, v0, 1);
+ __ andi(t0, a2, kFailureTagMask);
+ __ Branch(eq, &failure_returned, t0, Operand(zero_reg));
+
+ // Exit C frame and return.
+ // v0:v1: result
+ // sp: stack pointer
+ // fp: frame pointer
+ __ LeaveExitFrame(mode_);
+
+ // Check if we should retry or throw exception.
+ Label retry;
+ __ bind(&failure_returned);
+ ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
+ __ Branch(eq, &retry, t0, Operand(zero_reg));
+
+ // Special handling of out of memory exceptions.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ Branch(eq, throw_out_of_memory_exception,
+ v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+
+ // Retrieve the pending exception and clear the variable.
+ __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
+ __ lw(a3, MemOperand(t0));
+ __ LoadExternalReference(t0,
+ ExternalReference(Top::k_pending_exception_address));
+ __ lw(v0, MemOperand(t0));
+ __ sw(a3, MemOperand(t0));
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ Branch(eq, throw_termination_exception,
+ v0, Operand(Factory::termination_exception()));
+
+ // Handle normal exception.
+ __ b(throw_normal_exception);
+ __ nop(); // Branch delay slot nop.
+
+ __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
}
void CEntryStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x831);
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // a0: number of arguments including receiver
+ // a1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame(mode_, s0, s1, s2);
+
+ // s0: number of arguments (C callee-saved)
+ // s1: pointer to first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
+
+ Label throw_normal_exception;
+ Label throw_termination_exception;
+ Label throw_out_of_memory_exception;
+
+ // Call into the runtime system.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ false,
+ false);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ true);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+ __ bind(&throw_termination_exception);
+ GenerateThrowUncatchable(masm, TERMINATION);
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
}
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- UNIMPLEMENTED_MIPS();
- // Load a result.
- __ li(v0, Operand(0x1234));
- __ jr(ra);
- // Return
- __ nop();
+ Label invoke, exit;
+
+ // Registers:
+ // a0: entry address
+ // a1: function
+ // a2: reveiver
+ // a3: argc
+ //
+ // Stack:
+ // 4 args slots
+ // args
+
+ // Save callee saved registers on the stack.
+ __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
+
+ // We build an EntryFrame.
+ __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ li(t2, Operand(Smi::FromInt(marker)));
+ __ li(t1, Operand(Smi::FromInt(marker)));
+ __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+ __ lw(t0, MemOperand(t0));
+ __ MultiPush(t0.bit() | t1.bit() | t2.bit() | t3.bit());
+
+ // Setup frame pointer for the frame to be pushed.
+ __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Load argv in s0 register.
+ __ lw(s0, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize +
+ StandardFrameConstants::kCArgsSlotsSize));
+
+ // Registers:
+ // a0: entry_address
+ // a1: function
+ // a2: reveiver_pointer
+ // a3: argc
+ // s0: argv
+ //
+ // Stack:
+ // caller fp |
+ // function slot | entry frame
+ // context slot |
+ // bad fp (0xff...f) |
+ // callee saved registers + ra
+ // 4 args slots
+ // args
+
+ // Call a faked try-block that does the invoke.
+ __ bal(&invoke);
+ __ nop(); // Branch delay slot nop.
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ // Coming in here the fp will be invalid because the PushTryHandler below
+ // sets it to 0 to signal the existence of the JSEntry frame.
+ __ LoadExternalReference(t0,
+ ExternalReference(Top::k_pending_exception_address));
+ __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
+ __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ b(&exit);
+ __ nop(); // Branch delay slot nop.
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bal(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
+ __ lw(t1, MemOperand(t0));
+ __ LoadExternalReference(t0,
+ ExternalReference(Top::k_pending_exception_address));
+ __ sw(t1, MemOperand(t0));
+
+ // Invoke the function by calling through JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Registers:
+ // a0: entry_address
+ // a1: function
+ // a2: reveiver_pointer
+ // a3: argc
+ // s0: argv
+ //
+ // Stack:
+ // handler frame
+ // entry frame
+ // callee saved registers + ra
+ // 4 args slots
+ // args
+
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ LoadExternalReference(t0, construct_entry);
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ LoadExternalReference(t0, entry);
+ }
+ __ lw(t9, MemOperand(t0)); // deref address
+
+ // Call JSEntryTrampoline.
+ __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+ __ CallBuiltin(t9);
+
+ // Unlink this frame from the handler chain. When reading the
+ // address of the next handler, there is no need to use the address
+ // displacement since the current stack pointer (sp) points directly
+ // to the stack handler.
+ __ lw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
+ __ LoadExternalReference(t0, ExternalReference(Top::k_handler_address));
+ __ sw(t1, MemOperand(t0));
+
+ // This restores sp to its position before PushTryHandler.
+ __ addiu(sp, sp, StackHandlerConstants::kSize);
+
+ __ bind(&exit); // v0 holds result
+ // Restore the top frame descriptors from the stack.
+ __ Pop(t1);
+ __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+ __ sw(t1, MemOperand(t0));
+
+ // Reset the stack to the callee saved registers.
+ __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Restore callee saved registers from the stack.
+ __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
+ // Return.
+ __ Jump(ra);
}
// This stub performs an instanceof, calling the builtin function if
-// necessary. Uses a1 for the object, a0 for the function that it may
+// necessary. Uses a1 for the object, a0 for the function that it may
// be an instance of (these are fetched from the stack).
void InstanceofStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 147b872..0f0a746 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -42,7 +42,77 @@
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-// -------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
+// Reference support
+
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+class Reference BASE_EMBEDDED {
+ public:
+ // The values of the types is important, see size().
+ enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+ Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get = false);
+ ~Reference();
+
+ Expression* expression() const { return expression_; }
+ Type type() const { return type_; }
+ void set_type(Type value) {
+ ASSERT_EQ(ILLEGAL, type_);
+ type_ = value;
+ }
+
+ void set_unloaded() {
+ ASSERT_NE(ILLEGAL, type_);
+ ASSERT_NE(UNLOADED, type_);
+ type_ = UNLOADED;
+ }
+ // The size the reference takes up on the stack.
+ int size() const {
+ return (type_ < SLOT) ? 0 : type_;
+ }
+
+ bool is_illegal() const { return type_ == ILLEGAL; }
+ bool is_slot() const { return type_ == SLOT; }
+ bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+ bool is_unloaded() const { return type_ == UNLOADED; }
+
+ // Return the name. Only valid for named property references.
+ Handle<String> GetName();
+
+ // Generate code to push the value of the reference on top of the
+ // expression stack. The reference is expected to be already on top of
+ // the expression stack, and it is consumed by the call unless the
+ // reference is for a compound assignment.
+ // If the reference is not consumed, it is left in place under its value.
+ void GetValue();
+
+ // Generate code to pop a reference, push the value of the reference,
+ // and then spill the stack frame.
+ inline void GetValueAndSpill();
+
+ // Generate code to store the value on top of the expression stack in the
+ // reference. The reference is expected to be immediately below the value
+ // on the expression stack. The value is stored in the location specified
+ // by the reference, and is left on top of the stack, after the reference
+ // is popped from beneath it (unloaded).
+ void SetValue(InitState init_state);
+
+ private:
+ CodeGenerator* cgen_;
+ Expression* expression_;
+ Type type_;
+ // Keep the reference on the stack after get, so it can be used by set later.
+ bool persist_after_get_;
+};
+
+
+// -----------------------------------------------------------------------------
// Code generation state
// The state is passed down the AST by the code generator (and back up, in
@@ -89,7 +159,7 @@
-// -------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
@@ -152,16 +222,19 @@
// Number of instructions used for the JS return sequence. The constant is
// used by the debugger to patch the JS return sequence.
- static const int kJSReturnSequenceLength = 6;
+ static const int kJSReturnSequenceLength = 7;
+
+ // If the name is an inline runtime function call return the number of
+ // expected arguments. Otherwise return -1.
+ static int InlineRuntimeCallArgumentsCount(Handle<String> name);
private:
// Construction/Destruction.
explicit CodeGenerator(MacroAssembler* masm);
- virtual ~CodeGenerator() { delete masm_; }
// Accessors.
inline bool is_eval();
- Scope* scope() const { return scope_; }
+ inline Scope* scope();
// Generating deferred code.
void ProcessDeferred();
@@ -183,12 +256,55 @@
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
+ // Visit a statement and then spill the virtual frame if control flow can
+ // reach the end of the statement (ie, it does not exit via break,
+ // continue, return, or throw). This function is used temporarily while
+ // the code generator is being transformed.
+ inline void VisitAndSpill(Statement* statement);
+
+ // Visit a list of statements and then spill the virtual frame if control
+ // flow can reach the end of the list.
+ inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
+
// Main code generation function
- void Generate(CompilationInfo* info, Mode mode);
+ void Generate(CompilationInfo* info);
+
+ // The following are used by class Reference.
+ void LoadReference(Reference* ref);
+ void UnloadReference(Reference* ref);
+
+ MemOperand ContextOperand(Register context, int index) const {
+ return MemOperand(context, Context::SlotOffset(index));
+ }
+
+ MemOperand SlotOperand(Slot* slot, Register tmp);
+
+ // Expressions
+ MemOperand GlobalObject() const {
+ return ContextOperand(cp, Context::GLOBAL_INDEX);
+ }
+
+ void LoadCondition(Expression* x,
+ JumpTarget* true_target,
+ JumpTarget* false_target,
+ bool force_cc);
+ void Load(Expression* x);
+ void LoadGlobal();
+
+ // Generate code to push the value of an expression on top of the frame
+ // and then spill the frame fully to memory. This function is used
+ // temporarily while the code generator is being transformed.
+ inline void LoadAndSpill(Expression* expression);
+
+ // Read a value from a slot and leave it on top of the expression stack.
+ void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+ // Store the value on top of the stack to a slot.
+ void StoreToSlot(Slot* slot, InitState init_state);
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
+ int nargs;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
@@ -217,7 +333,7 @@
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+ void GenerateArguments(ZoneList<Expression*>* args);
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
@@ -227,13 +343,16 @@
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+ // Fast support for string.charAt(n) and string[n].
+ void GenerateCharFromCode(ZoneList<Expression*>* args);
+
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
void GenerateLog(ZoneList<Expression*>* args);
// Fast support for Math.random().
- void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+ void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
@@ -244,10 +363,11 @@
void GenerateRegExpExec(ZoneList<Expression*>* args);
void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast support for Math.sin and Math.cos.
- inline void GenerateMathSin(ZoneList<Expression*>* args);
- inline void GenerateMathCos(ZoneList<Expression*>* args);
+ // Fast call to math functions.
+ void GenerateMathPow(ZoneList<Expression*>* args);
+ void GenerateMathSin(ZoneList<Expression*>* args);
+ void GenerateMathCos(ZoneList<Expression*>* args);
+ void GenerateMathSqrt(ZoneList<Expression*>* args);
// Simple condition analysis.
enum ConditionAnalysis {
@@ -282,7 +402,6 @@
CompilationInfo* info_;
// Code generation state
- Scope* scope_;
VirtualFrame* frame_;
RegisterAllocator* allocator_;
Condition cc_reg_;
@@ -302,6 +421,7 @@
friend class JumpTarget;
friend class Reference;
friend class FastCodeGenerator;
+ friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index 772bcc0..cdb35ae 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -104,8 +104,24 @@
}
+void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ masm->Abort("LiveEdit frame dropping is not supported on mips");
+}
+
+void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+ masm->Abort("LiveEdit frame dropping is not supported on mips");
+}
+
#undef __
+
+void Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
+ Handle<Code> code) {
+ UNREACHABLE();
+}
+const int Debug::kFrameDropperFrameSize = -1;
+
+
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
diff --git a/src/mips/fast-codegen-mips.cc b/src/mips/fast-codegen-mips.cc
index c47f632..48a0ce6 100644
--- a/src/mips/fast-codegen-mips.cc
+++ b/src/mips/fast-codegen-mips.cc
@@ -35,6 +35,14 @@
#define __ ACCESS_MASM(masm_)
+Register FastCodeGenerator::accumulator0() { return no_reg; }
+Register FastCodeGenerator::accumulator1() { return no_reg; }
+Register FastCodeGenerator::scratch0() { return no_reg; }
+Register FastCodeGenerator::scratch1() { return no_reg; }
+Register FastCodeGenerator::receiver_reg() { return no_reg; }
+Register FastCodeGenerator::context_reg() { return no_reg; }
+
+
void FastCodeGenerator::Generate(CompilationInfo* info) {
UNIMPLEMENTED_MIPS();
}
@@ -45,7 +53,17 @@
}
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> name) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCodeGenerator::EmitBitOr() {
UNIMPLEMENTED_MIPS();
}
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index d2c717c..cdc880d 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -91,8 +91,7 @@
Address InternalFrame::GetCallerStackPointer() const {
- UNIMPLEMENTED_MIPS();
- return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
+ return fp() + StandardFrameConstants::kCallerSPOffset;
}
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index ec1949d..06e9979 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -104,7 +104,7 @@
static const int kCallerPCOffset = +1 * kPointerSize;
// FP-relative displacement of the caller's SP.
- static const int kCallerSPDisplacement = +4 * kPointerSize;
+ static const int kCallerSPDisplacement = +3 * kPointerSize;
};
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 920329e..3c29e99 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -146,6 +146,11 @@
}
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
UNIMPLEMENTED_MIPS();
}
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index 5598cdf..8c90921 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -74,6 +74,47 @@
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
+ // Registers:
+ // a2: name
+ // ra: return address
+
+ // Get the receiver of the function from the stack.
+ __ lw(a3, MemOperand(sp, argc*kPointerSize));
+
+ __ EnterInternalFrame();
+
+ // Push the receiver and the name of the function.
+ __ MultiPush(a2.bit() | a3.bit());
+
+ // Call the entry.
+ __ li(a0, Operand(2));
+ __ li(a1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+
+ // Move result to r1 and leave the internal frame.
+ __ mov(a1, v0);
+ __ LeaveInternalFrame();
+
+ // Check if the receiver is a global object of some sort.
+ Label invoke, global;
+ __ lw(a2, MemOperand(sp, argc * kPointerSize));
+ __ andi(t0, a2, kSmiTagMask);
+ __ Branch(eq, &invoke, t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, a3);
+ __ Branch(eq, &global, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ Branch(ne, &invoke, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
+
+ // Patch the receiver on the stack.
+ __ bind(&global);
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a2, MemOperand(sp, argc * kPointerSize));
+
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ bind(&invoke);
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION);
}
// Defined in ic.cc.
@@ -90,11 +131,6 @@
void LoadIC::GenerateMiss(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
-}
-
-
-void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
UNIMPLEMENTED_MIPS();
}
@@ -120,11 +156,6 @@
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
-}
-
-
-void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
UNIMPLEMENTED_MIPS();
}
@@ -145,12 +176,6 @@
}
-void KeyedStoreIC::Generate(MacroAssembler* masm,
- const ExternalReference& f) {
- UNIMPLEMENTED_MIPS();
-}
-
-
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
@@ -162,7 +187,12 @@
}
-void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
@@ -172,12 +202,12 @@
}
-void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
+void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
diff --git a/src/mips/jump-target-mips.cc b/src/mips/jump-target-mips.cc
index 3301d19..4bd9102 100644
--- a/src/mips/jump-target-mips.cc
+++ b/src/mips/jump-target-mips.cc
@@ -31,6 +31,7 @@
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
+#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {
@@ -41,7 +42,37 @@
#define __ ACCESS_MASM(cgen()->masm())
void JumpTarget::DoJump() {
- UNIMPLEMENTED_MIPS();
+ ASSERT(cgen()->has_valid_frame());
+ // Live non-frame registers are not allowed at unconditional jumps
+ // because we have no way of invalidating the corresponding results
+ // which are still live in the C++ code.
+ ASSERT(cgen()->HasValidEntryRegisters());
+
+ if (is_bound()) {
+ // Backward jump. There already a frame expectation at the target.
+ ASSERT(direction_ == BIDIRECTIONAL);
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
+ } else {
+ // Use the current frame as the expected one at the target if necessary.
+ if (entry_frame_ == NULL) {
+ entry_frame_ = cgen()->frame();
+ RegisterFile empty;
+ cgen()->SetFrame(NULL, &empty);
+ } else {
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
+ }
+
+ // The predicate is_linked() should be made true. Its implementation
+ // detects the presence of a frame pointer in the reaching_frames_ list.
+ if (!is_linked()) {
+ reaching_frames_.Add(NULL);
+ ASSERT(is_linked());
+ }
+ }
+ __ b(&entry_label_);
+ __ nop(); // Branch delay slot nop.
}
@@ -56,12 +87,47 @@
void JumpTarget::DoBind() {
- UNIMPLEMENTED_MIPS();
+ ASSERT(!is_bound());
+
+ // Live non-frame registers are not allowed at the start of a basic
+ // block.
+ ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
+
+ if (cgen()->has_valid_frame()) {
+ // If there is a current frame we can use it on the fall through.
+ if (entry_frame_ == NULL) {
+ entry_frame_ = new VirtualFrame(cgen()->frame());
+ } else {
+ ASSERT(cgen()->frame()->Equals(entry_frame_));
+ }
+ } else {
+ // If there is no current frame we must have an entry frame which we can
+ // copy.
+ ASSERT(entry_frame_ != NULL);
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+ }
+
+ // The predicate is_linked() should be made false. Its implementation
+ // detects the presence (or absence) of frame pointers in the
+ // reaching_frames_ list. If we inserted a bogus frame to make
+ // is_linked() true, remove it now.
+ if (is_linked()) {
+ reaching_frames_.Clear();
+ }
+
+ __ bind(&entry_label_);
}
void BreakTarget::Jump() {
- UNIMPLEMENTED_MIPS();
+ // On ARM we do not currently emit merge code for jumps, so we need to do
+ // it explicitly here. The only merging necessary is to drop extra
+ // statement state from the stack.
+ ASSERT(cgen()->has_valid_frame());
+ int count = cgen()->frame()->height() - expected_height_;
+ cgen()->frame()->Drop(count);
+ DoJump();
}
@@ -71,7 +137,26 @@
void BreakTarget::Bind() {
- UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+ // All the forward-reaching frames should have been adjusted at the
+ // jumps to this target.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ ASSERT(reaching_frames_[i] == NULL ||
+ reaching_frames_[i]->height() == expected_height_);
+ }
+#endif
+ // Drop leftover statement state from the frame before merging, even
+ // on the fall through. This is so we can bind the return target
+ // with state on the frame.
+ if (cgen()->has_valid_frame()) {
+ int count = cgen()->frame()->height() - expected_height_;
+ // On ARM we do not currently emit merge code at binding sites, so we need
+ // to do it explicitly here. The only merging necessary is to drop extra
+ // statement state from the stack.
+ cgen()->frame()->Drop(count);
+ }
+
+ DoBind();
}
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index b733bdd..c276af5 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -55,7 +55,7 @@
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
- Jump(Operand(target), cond, r1, r2);
+ Jump(Operand(target, rmode), cond, r1, r2);
}
@@ -81,7 +81,7 @@
void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
- Call(Operand(target), cond, r1, r2);
+ Call(Operand(target, rmode), cond, r1, r2);
}
@@ -106,7 +106,7 @@
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
- lw(destination, MemOperand(s4, index << kPointerSizeLog2));
+ lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
void MacroAssembler::LoadRoot(Register destination,
@@ -114,8 +114,7 @@
Condition cond,
Register src1, const Operand& src2) {
Branch(NegateCondition(cond), 2, src1, src2);
- nop();
- lw(destination, MemOperand(s4, index << kPointerSizeLog2));
+ lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
@@ -320,7 +319,6 @@
}
-// load wartd in a register
void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
ASSERT(!j.is_reg());
@@ -372,7 +370,7 @@
int16_t NumToPush = NumberOfBitsSet(regs);
addiu(sp, sp, -4 * NumToPush);
- for (int16_t i = 0; i < kNumRegisters; i++) {
+ for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) {
sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
}
@@ -385,7 +383,7 @@
int16_t NumToPush = NumberOfBitsSet(regs);
addiu(sp, sp, -4 * NumToPush);
- for (int16_t i = kNumRegisters; i > 0; i--) {
+ for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
}
@@ -396,7 +394,7 @@
void MacroAssembler::MultiPop(RegList regs) {
int16_t NumSaved = 0;
- for (int16_t i = kNumRegisters; i > 0; i--) {
+ for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
}
@@ -408,7 +406,7 @@
void MacroAssembler::MultiPopReversed(RegList regs) {
int16_t NumSaved = 0;
- for (int16_t i = 0; i < kNumRegisters; i++) {
+ for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
}
@@ -422,7 +420,7 @@
// Trashes the at register if no scratch register is provided.
void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
const Operand& rt, Register scratch) {
- Register r2;
+ Register r2 = no_reg;
if (rt.is_reg()) {
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
@@ -484,12 +482,14 @@
default:
UNREACHABLE();
}
+ // Emit a nop in the branch delay slot.
+ nop();
}
void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
const Operand& rt, Register scratch) {
- Register r2;
+ Register r2 = no_reg;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -550,6 +550,8 @@
default:
UNREACHABLE();
}
+ // Emit a nop in the branch delay slot.
+ nop();
}
@@ -559,7 +561,7 @@
// cases, so we keep slt and add an intermediate third instruction.
void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
const Operand& rt, Register scratch) {
- Register r2;
+ Register r2 = no_reg;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -629,12 +631,14 @@
default:
UNREACHABLE();
}
+ // Emit a nop in the branch delay slot.
+ nop();
}
void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
const Operand& rt, Register scratch) {
- Register r2;
+ Register r2 = no_reg;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -704,6 +708,8 @@
default:
UNREACHABLE();
}
+ // Emit a nop in the branch delay slot.
+ nop();
}
@@ -714,7 +720,6 @@
jr(target.rm());
} else {
Branch(NegateCondition(cond), 2, rs, rt);
- nop();
jr(target.rm());
}
} else { // !target.is_reg()
@@ -723,20 +728,20 @@
j(target.imm32_);
} else {
Branch(NegateCondition(cond), 2, rs, rt);
- nop();
- j(target.imm32_); // will generate only one instruction.
+ j(target.imm32_); // Will generate only one instruction.
}
} else { // MustUseAt(target)
- li(at, rt);
+ li(at, target);
if (cond == cc_always) {
jr(at);
} else {
Branch(NegateCondition(cond), 2, rs, rt);
- nop();
- jr(at); // will generate only one instruction.
+ jr(at); // Will generate only one instruction.
}
}
}
+ // Emit a nop in the branch delay slot.
+ nop();
}
@@ -747,7 +752,6 @@
jalr(target.rm());
} else {
Branch(NegateCondition(cond), 2, rs, rt);
- nop();
jalr(target.rm());
}
} else { // !target.is_reg()
@@ -756,20 +760,20 @@
jal(target.imm32_);
} else {
Branch(NegateCondition(cond), 2, rs, rt);
- nop();
- jal(target.imm32_); // will generate only one instruction.
+ jal(target.imm32_); // Will generate only one instruction.
}
} else { // MustUseAt(target)
- li(at, rt);
+ li(at, target);
if (cond == cc_always) {
jalr(at);
} else {
Branch(NegateCondition(cond), 2, rs, rt);
- nop();
- jalr(at); // will generate only one instruction.
+ jalr(at); // Will generate only one instruction.
}
}
}
+ // Emit a nop in the branch delay slot.
+ nop();
}
void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
@@ -787,12 +791,73 @@
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void MacroAssembler::DebugBreak() {
+ UNIMPLEMENTED_MIPS();
+ }
+#endif
+
+
// ---------------------------------------------------------------------------
// Exception handling
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
- UNIMPLEMENTED_MIPS();
+ // Adjust this code if not the case.
+ ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+ // The return address is passed in register ra.
+ if (try_location == IN_JAVASCRIPT) {
+ if (type == TRY_CATCH_HANDLER) {
+ li(t0, Operand(StackHandler::TRY_CATCH));
+ } else {
+ li(t0, Operand(StackHandler::TRY_FINALLY));
+ }
+ ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
+ && StackHandlerConstants::kFPOffset == 2 * kPointerSize
+ && StackHandlerConstants::kPCOffset == 3 * kPointerSize
+ && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+ // Save the current handler as the next handler.
+ LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+ lw(t1, MemOperand(t2));
+
+ addiu(sp, sp, -StackHandlerConstants::kSize);
+ sw(ra, MemOperand(sp, 12));
+ sw(fp, MemOperand(sp, 8));
+ sw(t0, MemOperand(sp, 4));
+ sw(t1, MemOperand(sp, 0));
+
+ // Link this handler as the new current one.
+ sw(sp, MemOperand(t2));
+
+ } else {
+ // Must preserve a0-a3, and s0 (argv).
+ ASSERT(try_location == IN_JS_ENTRY);
+ ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
+ && StackHandlerConstants::kFPOffset == 2 * kPointerSize
+ && StackHandlerConstants::kPCOffset == 3 * kPointerSize
+ && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+
+ // The frame pointer does not point to a JS frame so we save NULL
+ // for fp. We expect the code throwing an exception to check fp
+ // before dereferencing it to restore the context.
+ li(t0, Operand(StackHandler::ENTRY));
+
+ // Save the current handler as the next handler.
+ LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+ lw(t1, MemOperand(t2));
+
+ addiu(sp, sp, -StackHandlerConstants::kSize);
+ sw(ra, MemOperand(sp, 12));
+ sw(zero_reg, MemOperand(sp, 8));
+ sw(t0, MemOperand(sp, 4));
+ sw(t1, MemOperand(sp, 0));
+
+ // Link this handler as the new current one.
+ sw(sp, MemOperand(t2));
+ }
}
@@ -802,12 +867,233 @@
-// ---------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
// Activation frames
+void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) {
+ Label extra_push, end;
+
+ andi(scratch, sp, 7);
+
+ // We check for args and receiver size on the stack, all of them word sized.
+ // We add one for sp, that we also want to store on the stack.
+ if (((arg_count + 1) % kPointerSizeLog2) == 0) {
+ Branch(ne, &extra_push, at, Operand(zero_reg));
+ } else { // ((arg_count + 1) % 2) == 1
+ Branch(eq, &extra_push, at, Operand(zero_reg));
+ }
+
+ // Save sp on the stack.
+ mov(scratch, sp);
+ Push(scratch);
+ b(&end);
+
+ // Align before saving sp on the stack.
+ bind(&extra_push);
+ mov(scratch, sp);
+ addiu(sp, sp, -8);
+ sw(scratch, MemOperand(sp));
+
+ // The stack is aligned and sp is stored on the top.
+ bind(&end);
+}
+
+
+void MacroAssembler::ReturnFromAlignedCall() {
+ lw(sp, MemOperand(sp));
+}
+
+
+// -----------------------------------------------------------------------------
+// JavaScript invokes
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag) {
+ bool definitely_matches = false;
+ Label regular_invoke;
+
+ // Check whether the expected and actual arguments count match. If not,
+ // setup registers according to contract with ArgumentsAdaptorTrampoline:
+ // a0: actual arguments count
+ // a1: function (passed through to callee)
+ // a2: expected arguments count
+ // a3: callee code entry
+
+ // The code below is made a lot easier because the calling code already sets
+ // up actual and expected registers according to the contract if values are
+ // passed in registers.
+ ASSERT(actual.is_immediate() || actual.reg().is(a0));
+ ASSERT(expected.is_immediate() || expected.reg().is(a2));
+ ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
+
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ li(a0, Operand(actual.immediate()));
+ const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ if (expected.immediate() == sentinel) {
+ // Don't worry about adapting arguments for builtins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ li(a2, Operand(expected.immediate()));
+ }
+ }
+ } else if (actual.is_immediate()) {
+ Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.immediate()));
+ li(a0, Operand(actual.immediate()));
+ } else {
+ Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.reg()));
+ }
+
+ if (!definitely_matches) {
+ if (!code_constant.is_null()) {
+ li(a3, Operand(code_constant));
+ addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
+ }
+
+ ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline);
+ if (flag == CALL_FUNCTION) {
+ CallBuiltin(adaptor);
+ b(done);
+ nop();
+ } else {
+ JumpToBuiltin(adaptor);
+ }
+ bind(®ular_invoke);
+ }
+}
+
+void MacroAssembler::InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag) {
+ Label done;
+
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+ if (flag == CALL_FUNCTION) {
+ Call(code);
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(code);
+ }
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+}
+
+
+void MacroAssembler::InvokeCode(Handle<Code> code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ RelocInfo::Mode rmode,
+ InvokeFlag flag) {
+ Label done;
+
+ InvokePrologue(expected, actual, code, no_reg, &done, flag);
+ if (flag == CALL_FUNCTION) {
+ Call(code, rmode);
+ } else {
+ Jump(code, rmode);
+ }
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag) {
+ // Contract with called JS functions requires that function is passed in a1.
+ ASSERT(function.is(a1));
+ Register expected_reg = a2;
+ Register code_reg = a3;
+
+ lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ lw(expected_reg,
+ FieldMemOperand(code_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ lw(code_reg,
+ MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
+ addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag);
+
+ ParameterCount expected(expected_reg);
+ InvokeCode(code_reg, expected, actual, flag);
+}
+
+
+// ---------------------------------------------------------------------------
+// Support functions.
+
+ void MacroAssembler::GetObjectType(Register function,
+ Register map,
+ Register type_reg) {
+ lw(map, FieldMemOperand(function, HeapObject::kMapOffset));
+ lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ }
+
+
+ void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) {
+ // Load builtin address.
+ LoadExternalReference(t9, builtin_entry);
+ lw(t9, MemOperand(t9)); // Deref address.
+ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+ // Call and allocate arguments slots.
+ jalr(t9);
+ // Use the branch delay slot to allocated argument slots.
+ addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+ addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
+ }
+
+
+ void MacroAssembler::CallBuiltin(Register target) {
+ // Target already holds target address.
+ // Call and allocate arguments slots.
+ jalr(target);
+ // Use the branch delay slot to allocated argument slots.
+ addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+ addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
+ }
+
+
+ void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) {
+ // Load builtin address.
+ LoadExternalReference(t9, builtin_entry);
+ lw(t9, MemOperand(t9)); // Deref address.
+ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+ // Call and allocate arguments slots.
+ jr(t9);
+ // Use the branch delay slot to allocated argument slots.
+ addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+ }
+
+
+ void MacroAssembler::JumpToBuiltin(Register target) {
+ // t9 already holds target address.
+ // Call and allocate arguments slots.
+ jr(t9);
+ // Use the branch delay slot to allocated argument slots.
+ addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+ }
+
+
+// -----------------------------------------------------------------------------
+// Runtime calls
+
void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
}
@@ -816,24 +1102,56 @@
}
+void MacroAssembler::IllegalOperation(int num_arguments) {
+ if (num_arguments > 0) {
+ addiu(sp, sp, num_arguments * kPointerSize);
+ }
+ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+}
+
+
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
- UNIMPLEMENTED_MIPS();
+ // All parameters are on the stack. v0 has the return value after call.
+
+ // If the expected number of arguments of the runtime function is
+ // constant, we check that the actual number of arguments match the
+ // expectation.
+ if (f->nargs >= 0 && f->nargs != num_arguments) {
+ IllegalOperation(num_arguments);
+ return;
+ }
+
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ li(a0, num_arguments);
+ LoadExternalReference(a1, ExternalReference(f));
+ CEntryStub stub(1);
+ CallStub(&stub);
}
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
+}
+
+
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size) {
UNIMPLEMENTED_MIPS();
}
-void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
- UNIMPLEMENTED_MIPS();
+ TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
}
-void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
UNIMPLEMENTED_MIPS();
}
@@ -874,6 +1192,8 @@
}
+// -----------------------------------------------------------------------------
+// Debugging
void MacroAssembler::Assert(Condition cc, const char* msg,
Register rs, Operand rt) {
@@ -891,5 +1211,113 @@
UNIMPLEMENTED_MIPS();
}
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+ addiu(sp, sp, -5 * kPointerSize);
+ li(t0, Operand(Smi::FromInt(type)));
+ li(t1, Operand(CodeObject()));
+ sw(ra, MemOperand(sp, 4 * kPointerSize));
+ sw(fp, MemOperand(sp, 3 * kPointerSize));
+ sw(cp, MemOperand(sp, 2 * kPointerSize));
+ sw(t0, MemOperand(sp, 1 * kPointerSize));
+ sw(t1, MemOperand(sp, 0 * kPointerSize));
+ addiu(fp, sp, 3 * kPointerSize);
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+ mov(sp, fp);
+ lw(fp, MemOperand(sp, 0 * kPointerSize));
+ lw(ra, MemOperand(sp, 1 * kPointerSize));
+ addiu(sp, sp, 2 * kPointerSize);
+}
+
+
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode,
+ Register hold_argc,
+ Register hold_argv,
+ Register hold_function) {
+ // Compute the argv pointer and keep it in a callee-saved register.
+ // a0 is argc.
+ sll(t0, a0, kPointerSizeLog2);
+ add(hold_argv, sp, t0);
+ addi(hold_argv, hold_argv, -kPointerSize);
+
+ // Compute callee's stack pointer before making changes and save it as
+ // t1 register so that it is restored as sp register on exit, thereby
+ // popping the args.
+ // t1 = sp + kPointerSize * #args
+ add(t1, sp, t0);
+
+ // Align the stack at this point.
+ AlignStack(0);
+
+ // Save registers.
+ addiu(sp, sp, -12);
+ sw(t1, MemOperand(sp, 8));
+ sw(ra, MemOperand(sp, 4));
+ sw(fp, MemOperand(sp, 0));
+ mov(fp, sp); // Setup new frame pointer.
+
+ // Push debug marker.
+ if (mode == ExitFrame::MODE_DEBUG) {
+ Push(zero_reg);
+ } else {
+ li(t0, Operand(CodeObject()));
+ Push(t0);
+ }
+
+ // Save the frame pointer and the context in top.
+ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+ sw(fp, MemOperand(t0));
+ LoadExternalReference(t0, ExternalReference(Top::k_context_address));
+ sw(cp, MemOperand(t0));
+
+ // Setup argc and the builtin function in callee-saved registers.
+ mov(hold_argc, a0);
+ mov(hold_function, a1);
+}
+
+
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
+ // Clear top frame.
+ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+ sw(zero_reg, MemOperand(t0));
+
+ // Restore current context from top and clear it in debug mode.
+ LoadExternalReference(t0, ExternalReference(Top::k_context_address));
+ lw(cp, MemOperand(t0));
+#ifdef DEBUG
+ sw(a3, MemOperand(t0));
+#endif
+
+ // Pop the arguments, restore registers, and return.
+ mov(sp, fp); // Respect ABI stack constraint.
+ lw(fp, MemOperand(sp, 0));
+ lw(ra, MemOperand(sp, 4));
+ lw(sp, MemOperand(sp, 8));
+ jr(ra);
+ nop(); // Branch delay slot nop.
+}
+
+
+void MacroAssembler::AlignStack(int offset) {
+ // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
+ // and an offset of 1 aligns to 4 modulo 8 bytes.
+ int activation_frame_alignment = OS::ActivationFrameAlignment();
+ if (activation_frame_alignment != kPointerSize) {
+ // This code needs to be made more general if this assert doesn't hold.
+ ASSERT(activation_frame_alignment == 2 * kPointerSize);
+ if (offset == 0) {
+ andi(t0, sp, activation_frame_alignment - 1);
+ Push(zero_reg, eq, t0, zero_reg);
+ } else {
+ andi(t0, sp, activation_frame_alignment - 1);
+ addiu(t0, t0, -4);
+ Push(zero_reg, eq, t0, zero_reg);
+ }
+ }
+}
+
} } // namespace v8::internal
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index aea9836..0f0365b 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -41,6 +41,7 @@
// unless we know exactly what we do.
// Registers aliases
+// cp is assumed to be a callee saved register.
const Register cp = s7; // JavaScript context pointer
const Register fp = s8_fp; // Alias fp
@@ -102,10 +103,10 @@
// Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in
// CodeGenerator::ProcessDeferred().
+ // Currently the branch delay slot is filled by the MacroAssembler.
// Use rather b(Label) for code generation.
void jmp(Label* L) {
Branch(cc_always, L);
- nop();
}
// Load an object from the root table.
@@ -115,11 +116,12 @@
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
- // Sets the remembered set bit for [address+offset], where address is the
- // address of the heap object 'object'. The address must be in the first 8K
- // of an allocated page. The 'scratch' register is used in the
- // implementation and all 3 registers are clobbered by the operation, as
- // well as the ip register.
+ // Load an external reference.
+ void LoadExternalReference(Register reg, ExternalReference ext) {
+ li(reg, Operand(ext));
+ }
+
+ // Sets the remembered set bit for [address+offset].
void RecordWrite(Register object, Register offset, Register scratch);
@@ -182,19 +184,8 @@
// Push multiple registers on the stack.
- // With MultiPush, lower registers are pushed first on the stack.
- // For example if you push t0, t1, s0, and ra you get:
- // | |
- // |-----------------------|
- // | t0 | +
- // |-----------------------| |
- // | t1 | |
- // |-----------------------| |
- // | s0 | v
- // |-----------------------| -
- // | ra |
- // |-----------------------|
- // | |
+ // Registers are saved in numerical order, with higher numbered registers
+ // saved in higher memory addresses
void MultiPush(RegList regs);
void MultiPushReversed(RegList regs);
void Push(Register src) {
@@ -206,7 +197,6 @@
void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditionnal execution we use a Branch.
Branch(cond, 3, tst1, Operand(tst2));
- nop();
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
@@ -225,11 +215,71 @@
// ---------------------------------------------------------------------------
+ // Activation frames
+
+ void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+ void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+ // Enter specific kind of exit frame; either EXIT or
+ // EXIT_DEBUG. Expects the number of arguments in register a0 and
+ // the builtin function to call in register a1.
+ // On output hold_argc, hold_function, and hold_argv are setup.
+ void EnterExitFrame(ExitFrame::Mode mode,
+ Register hold_argc,
+ Register hold_argv,
+ Register hold_function);
+
+ // Leave the current exit frame. Expects the return value in v0.
+ void LeaveExitFrame(ExitFrame::Mode mode);
+
+ // Align the stack by optionally pushing a Smi zero.
+ void AlignStack(int offset);
+
+ void SetupAlignedCall(Register scratch, int arg_count = 0);
+ void ReturnFromAlignedCall();
+
+
+ // ---------------------------------------------------------------------------
+ // JavaScript invokes
+
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag);
+
+ void InvokeCode(Handle<Code> code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ RelocInfo::Mode rmode,
+ InvokeFlag flag);
+
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag);
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void SaveRegistersToMemory(RegList regs);
+ void RestoreRegistersFromMemory(RegList regs);
+ void CopyRegistersFromMemoryToStack(Register base, RegList regs);
+ void CopyRegistersFromStackToMemory(Register base,
+ Register scratch,
+ RegList regs);
+ void DebugBreak();
+#endif
+
+
+ // ---------------------------------------------------------------------------
// Exception handling
// Push a new try handler and link into try handler chain.
- // The return address must be passed in register lr.
- // On exit, r0 contains TOS (code slot).
+ // The return address must be passed in register ra.
void PushTryHandler(CodeLocation try_location, HandlerType type);
// Unlink the stack handler on top of the stack from the try handler chain.
@@ -240,6 +290,10 @@
// ---------------------------------------------------------------------------
// Support functions.
+ void GetObjectType(Register function,
+ Register map,
+ Register type_reg);
+
inline void BranchOnSmi(Register value, Label* smi_label,
Register scratch = at) {
ASSERT_EQ(0, kSmiTag);
@@ -255,6 +309,15 @@
Branch(ne, not_smi_label, scratch, Operand(zero_reg));
}
+ void CallBuiltin(ExternalReference builtin_entry);
+ void CallBuiltin(Register target);
+ void JumpToBuiltin(ExternalReference builtin_entry);
+ void JumpToBuiltin(Register target);
+
+ // Generates code for reporting that an illegal operation has
+ // occurred.
+ void IllegalOperation(int num_arguments);
+
// ---------------------------------------------------------------------------
// Runtime calls
@@ -268,21 +331,25 @@
void StubReturn(int argc);
// Call a runtime routine.
- // Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
// Tail call of a runtime routine (jump).
- // Like JumpToRuntime, but also takes care of passing the number
+ // Like JumpToExternalReference, but also takes care of passing the number
// of parameters.
- void TailCallRuntime(const ExternalReference& ext,
+ void TailCallExternalReference(const ExternalReference& ext,
+ int num_arguments,
+ int result_size);
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
// Jump to the builtin routine.
- void JumpToRuntime(const ExternalReference& builtin);
+ void JumpToExternalReference(const ExternalReference& builtin);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
@@ -339,20 +406,33 @@
bool allow_stub_calls() { return allow_stub_calls_; }
private:
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
- Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
-
List<Unresolved> unresolved_;
bool generating_stub_;
bool allow_stub_calls_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
+
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+
+ // Helper functions for generating invokes.
+ void InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag);
+
+ // Get the code for the given builtin. Returns if able to resolve
+ // the function in the 'resolved' flag.
+ Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+ // Activation support.
+ // EnterFrame clobbers t0 and t1.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
};
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 2e2dc86..bdb3b7f 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -31,7 +31,7 @@
#include "disasm.h"
#include "assembler.h"
-#include "globals.h" // Need the bit_cast
+#include "globals.h" // Need the BitCast
#include "mips/constants-mips.h"
#include "mips/simulator-mips.h"
@@ -139,7 +139,7 @@
sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
Debug();
}
-#endif // def GENERATED_CODE_COVERAGE
+#endif // GENERATED_CODE_COVERAGE
int32_t Debugger::GetRegisterValue(int regnum) {
@@ -604,7 +604,7 @@
void Simulator::set_fpu_register_double(int fpureg, double value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- *v8i::bit_cast<double*, int32_t*>(&FPUregisters_[fpureg]) = value;
+ *v8i::BitCast<double*, int32_t*>(&FPUregisters_[fpureg]) = value;
}
@@ -625,7 +625,7 @@
double Simulator::get_fpu_register_double(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- return *v8i::bit_cast<double*, int32_t*>(
+ return *v8i::BitCast<double*, int32_t*>(
const_cast<int32_t*>(&FPUregisters_[fpureg]));
}
@@ -901,7 +901,7 @@
break;
case MFHC1:
fp_out = get_fpu_register_double(fs_reg);
- alu_out = *v8i::bit_cast<int32_t*, double*>(&fp_out);
+ alu_out = *v8i::BitCast<int32_t*, double*>(&fp_out);
break;
case MTC1:
case MTHC1:
@@ -1644,5 +1644,5 @@
} } // namespace assembler::mips
-#endif // !defined(__mips)
+#endif // __mips
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index a87a49b..0b2d2c3 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -72,20 +72,6 @@
}
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x249);
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -99,7 +85,6 @@
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Builtins::Name storage_extend,
JSObject* object,
int index,
Map* transition,
@@ -120,18 +105,6 @@
#define __ ACCESS_MASM(masm())
-Register StubCompiler::CheckPrototypes(JSObject* object,
- Register object_reg,
- JSObject* holder,
- Register holder_reg,
- Register scratch,
- String* name,
- Label* miss) {
- UNIMPLEMENTED_MIPS();
- return at; // UNIMPLEMENTED RETURN
-}
-
-
void StubCompiler::GenerateLoadField(JSObject* object,
JSObject* holder,
Register receiver,
@@ -187,15 +160,58 @@
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+ // Registers:
+ // a1: function
+ // ra: return address
+
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+ // Preserve the function.
+ __ Push(a1);
+ // Setup aligned call.
+ __ SetupAlignedCall(t0, 1);
+ // Push the function on the stack as the argument to the runtime function.
+ __ Push(a1);
+ // Call the runtime function
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ __ ReturnFromAlignedCall();
+ // Calculate the entry point.
+ __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+ // Restore saved function.
+ __ Pop(a1);
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+ // Do a tail-call of the compiled function.
+ __ Jump(t9);
+
+ return GetCodeWithFlags(flags, "LazyCompileStub");
+}
+
+
+Object* CallStubCompiler::CompileCallField(JSObject* object,
+ JSObject* holder,
+ int index,
+ String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
-Object* CallStubCompiler::CompileCallField(Object* object,
- JSObject* holder,
- int index,
- String* name) {
+Object* CallStubCompiler::CompileArrayPushCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* CallStubCompiler::CompileArrayPopCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
@@ -211,7 +227,7 @@
}
-Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();
diff --git a/src/mips/virtual-frame-mips.cc b/src/mips/virtual-frame-mips.cc
index fad7ec4..c2116de 100644
--- a/src/mips/virtual-frame-mips.cc
+++ b/src/mips/virtual-frame-mips.cc
@@ -32,6 +32,7 @@
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
+#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {
@@ -41,17 +42,6 @@
#define __ ACCESS_MASM(masm())
-
-// On entry to a function, the virtual frame already contains the
-// receiver and the parameters. All initial frame elements are in
-// memory.
-VirtualFrame::VirtualFrame()
- : elements_(parameter_count() + local_count() + kPreallocatedElements),
- stack_pointer_(parameter_count()) { // 0-based index of TOS.
- UNIMPLEMENTED_MIPS();
-}
-
-
void VirtualFrame::SyncElementBelowStackPointer(int index) {
UNREACHABLE();
}
@@ -63,7 +53,12 @@
void VirtualFrame::SyncRange(int begin, int end) {
- UNIMPLEMENTED_MIPS();
+ // All elements are in memory on MIPS (ie, synced).
+#ifdef DEBUG
+ for (int i = begin; i <= end; i++) {
+ ASSERT(elements_[i].is_synced());
+ }
+#endif
}
@@ -73,7 +68,13 @@
void VirtualFrame::Enter() {
- UNIMPLEMENTED_MIPS();
+ // TODO(MIPS): Implement DEBUG
+
+ // We are about to push four values to the frame.
+ Adjust(4);
+ __ MultiPush(ra.bit() | fp.bit() | cp.bit() | a1.bit());
+ // Adjust FP to point to saved FP.
+ __ addiu(fp, sp, 2 * kPointerSize);
}
@@ -83,7 +84,17 @@
void VirtualFrame::AllocateStackSlots() {
- UNIMPLEMENTED_MIPS();
+ int count = local_count();
+ if (count > 0) {
+ Comment cmnt(masm(), "[ Allocate space for locals");
+ Adjust(count);
+ // Initialize stack slots with 'undefined' value.
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ addiu(sp, sp, -count * kPointerSize);
+ for (int i = 0; i < count; i++) {
+ __ sw(t0, MemOperand(sp, (count-i-1)*kPointerSize));
+ }
+ }
}
@@ -138,12 +149,16 @@
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- UNIMPLEMENTED_MIPS();
+ PrepareForCall(arg_count, arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ CallRuntime(f, arg_count);
}
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- UNIMPLEMENTED_MIPS();
+ PrepareForCall(arg_count, arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ CallRuntime(id, arg_count);
}
@@ -165,16 +180,37 @@
}
-void VirtualFrame::RawCallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode) {
- UNIMPLEMENTED_MIPS();
-}
-
-
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
- UNIMPLEMENTED_MIPS();
+ switch (code->kind()) {
+ case Code::CALL_IC:
+ break;
+ case Code::FUNCTION:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case Code::KEYED_LOAD_IC:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case Code::LOAD_IC:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case Code::KEYED_STORE_IC:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case Code::STORE_IC:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case Code::BUILTIN:
+ UNIMPLEMENTED_MIPS();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ Forget(dropped_args);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ Call(code, rmode);
}
@@ -197,7 +233,24 @@
void VirtualFrame::Drop(int count) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(count >= 0);
+ ASSERT(height() >= count);
+ int num_virtual_elements = (element_count() - 1) - stack_pointer_;
+
+ // Emit code to lower the stack pointer if necessary.
+ if (num_virtual_elements < count) {
+ int num_dropped = count - num_virtual_elements;
+ stack_pointer_ -= num_dropped;
+ __ addiu(sp, sp, num_dropped * kPointerSize);
+ }
+
+ // Discard elements from the virtual frame and free any registers.
+ for (int i = 0; i < count; i++) {
+ FrameElement dropped = elements_.RemoveLast();
+ if (dropped.is_register()) {
+ Unuse(dropped.reg());
+ }
+ }
}
@@ -209,27 +262,50 @@
Result VirtualFrame::Pop() {
UNIMPLEMENTED_MIPS();
Result res = Result();
- return res; // UNIMPLEMENTED RETUR
+ return res; // UNIMPLEMENTED RETURN
}
void VirtualFrame::EmitPop(Register reg) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(stack_pointer_ == element_count() - 1);
+ stack_pointer_--;
+ elements_.RemoveLast();
+ __ Pop(reg);
}
+
void VirtualFrame::EmitMultiPop(RegList regs) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(stack_pointer_ == element_count() - 1);
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ stack_pointer_--;
+ elements_.RemoveLast();
+ }
+ }
+ __ MultiPop(regs);
}
void VirtualFrame::EmitPush(Register reg) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(stack_pointer_ == element_count() - 1);
+ elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
+ stack_pointer_++;
+ __ Push(reg);
}
+
void VirtualFrame::EmitMultiPush(RegList regs) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(stack_pointer_ == element_count() - 1);
+ for (int16_t i = kNumRegisters; i > 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
+ stack_pointer_++;
+ }
+ }
+ __ MultiPush(regs);
}
+
void VirtualFrame::EmitArgumentSlots(RegList reglist) {
UNIMPLEMENTED_MIPS();
}
diff --git a/src/mips/virtual-frame-mips.h b/src/mips/virtual-frame-mips.h
index 79f973f..b32e2ae 100644
--- a/src/mips/virtual-frame-mips.h
+++ b/src/mips/virtual-frame-mips.h
@@ -39,18 +39,18 @@
// -------------------------------------------------------------------------
// Virtual frames
//
-// The virtual frame is an abstraction of the physical stack frame. It
+// The virtual frame is an abstraction of the physical stack frame. It
// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
+// stack. It supports push/pop operations on the expression stack, as well
// as random access to the expression stack elements, locals, and
// parameters.
class VirtualFrame : public ZoneObject {
public:
// A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
+ // expected to remain spilled. The constructor spills the code
// generator's current frame, but no attempt is made to require it
- // to stay spilled. It is intended as documentation while the code
+ // to stay spilled. It is intended as documentation while the code
// generator is being transformed.
class SpilledScope BASE_EMBEDDED {
public:
@@ -61,16 +61,17 @@
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
- VirtualFrame();
+ inline VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
- explicit VirtualFrame(VirtualFrame* original);
+ explicit inline VirtualFrame(VirtualFrame* original);
CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index);
+ FrameElement CopyElementAt(int index,
+ NumberInfo info = NumberInfo::Unknown());
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
@@ -104,12 +105,12 @@
}
// Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
+ // frame (eg, the frame after an exception handler is pushed). No code is
// emitted.
void Adjust(int count);
// Forget elements from the top of the frame to match an actual frame (eg,
- // the frame after a runtime call). No code is emitted.
+ // the frame after a runtime call). No code is emitted.
void Forget(int count) {
ASSERT(count >= 0);
ASSERT(stack_pointer_ == element_count() - 1);
@@ -120,7 +121,7 @@
}
// Forget count elements from the top of the frame and adjust the stack
- // pointer downward. This is used, for example, before merging frames at
+ // pointer downward. This is used, for example, before merging frames at
// break, continue, and return targets.
void ForgetElements(int count);
@@ -132,24 +133,24 @@
if (is_used(reg)) SpillElementAt(register_location(reg));
}
- // Spill all occurrences of an arbitrary register if possible. Return the
+ // Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references).
Register SpillAnyRegister();
// Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating
- // code. It is guaranteed that no code will be generated.
+ // code. It is guaranteed that no code will be generated.
void PrepareMergeTo(VirtualFrame* expected);
// Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
+ // frame. As a side effect, code may be emitted to make this frame match
// the expected one.
void MergeTo(VirtualFrame* expected);
- // Detach a frame from its code generator, perhaps temporarily. This
+ // Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
+ // registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
@@ -158,7 +159,7 @@
}
}
- // (Re)attach a frame to its code generator. This informs the register
+ // (Re)attach a frame to its code generator. This informs the register
// allocator that the frame-internal register references are active again.
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
@@ -169,17 +170,17 @@
}
}
- // Emit code for the physical JS entry and exit frame sequences. After
+ // Emit code for the physical JS entry and exit frame sequences. After
// calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
+ // Exit it should not be used. Note that Enter does not allocate space in
// the physical frame for storing frame-allocated locals.
void Enter();
void Exit();
// Prepare for returning from the frame by spilling locals and
- // dropping all non-locals elements in the virtual frame. This
+ // dropping all non-locals elements in the virtual frame. This
// avoids generating unnecessary merge code when jumping to the
- // shared return site. Emits code for spills.
+ // shared return site. Emits code for spills.
void PrepareForReturn();
// Allocate and initialize the frame-allocated locals.
@@ -193,11 +194,11 @@
return MemOperand(sp, index * kPointerSize);
}
- // Random-access store to a frame-top relative frame element. The result
+ // Random-access store to a frame-top relative frame element. The result
// becomes owned by the frame and is invalidated.
void SetElementAt(int index, Result* value);
- // Set a frame element to a constant. The index is frame-top relative.
+ // Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) {
Result temp(value);
SetElementAt(index, &temp);
@@ -220,13 +221,13 @@
}
// Push the value of a local frame slot on top of the frame and invalidate
- // the local slot. The slot should be written to before trying to read
+ // the local slot. The slot should be written to before trying to read
// from it again.
void TakeLocalAt(int index) {
TakeFrameSlotAt(local0_index() + index);
}
- // Store the top value on the virtual frame into a local frame slot. The
+ // Store the top value on the virtual frame into a local frame slot. The
// value is left in place on top of the frame.
void StoreToLocalAt(int index) {
StoreToFrameSlotAt(local0_index() + index);
@@ -266,7 +267,7 @@
}
// Push the value of a paramter frame slot on top of the frame and
- // invalidate the parameter slot. The slot should be written to before
+ // invalidate the parameter slot. The slot should be written to before
// trying to read from it again.
void TakeParameterAt(int index) {
TakeFrameSlotAt(param0_index() + index);
@@ -291,12 +292,8 @@
RawCallStub(stub);
}
- // Call stub that expects its argument in r0. The argument is given
- // as a result which must be the register r0.
void CallStub(CodeStub* stub, Result* arg);
- // Call stub that expects its arguments in r1 and r0. The arguments
- // are given as results which must be the appropriate registers.
void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
// Call runtime given the number of arguments expected on (and
@@ -316,7 +313,7 @@
int arg_count);
// Call into an IC stub given the number of arguments it removes
- // from the stack. Register arguments are passed as results and
+ // from the stack. Register arguments are passed as results and
// consumed by the call.
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
@@ -332,8 +329,8 @@
int dropped_args,
bool set_auto_args_slots = false);
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
+ // Drop a number of elements from the top of the expression stack. May
+ // emit code to affect the physical frame. Does not clobber any registers
// excepting possibly the stack pointer.
void Drop(int count);
// Similar to VirtualFrame::Drop but we don't modify the actual stack.
@@ -347,7 +344,7 @@
// Duplicate the top element of the frame.
void Dup() { PushFrameSlotAt(element_count() - 1); }
- // Pop an element from the top of the expression stack. Returns a
+ // Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
Result Pop();
@@ -355,20 +352,20 @@
// emit a corresponding pop instruction.
void EmitPop(Register reg);
// Same but for multiple registers
- void EmitMultiPop(RegList regs); // higher indexed registers popped first
- void EmitMultiPopReversed(RegList regs); // lower first
+ void EmitMultiPop(RegList regs);
+ void EmitMultiPopReversed(RegList regs);
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg);
// Same but for multiple registers.
- void EmitMultiPush(RegList regs); // lower indexed registers are pushed first
- void EmitMultiPushReversed(RegList regs); // higher first
+ void EmitMultiPush(RegList regs);
+ void EmitMultiPushReversed(RegList regs);
// Push an element on the virtual frame.
- void Push(Register reg);
- void Push(Handle<Object> value);
- void Push(Smi* value) { Push(Handle<Object>(value)); }
+ inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
+ inline void Push(Handle<Object> value);
+ inline void Push(Smi* value);
// Pushing a result invalidates it (its contents become owned by the frame).
void Push(Result* result) {
@@ -383,13 +380,16 @@
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
- // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
- void Nip(int num_dropped);
+ // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+ inline void Nip(int num_dropped);
// This pushes 4 arguments slots on the stack and saves asked 'a' registers
// 'a' registers are arguments register a0 to a3.
void EmitArgumentSlots(RegList reglist);
+ inline void SetTypeForLocalAt(int index, NumberInfo info);
+ inline void SetTypeForParamAt(int index, NumberInfo info);
+
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
@@ -413,23 +413,23 @@
int local_count() { return cgen()->scope()->num_stack_slots(); }
// The index of the element that is at the processor's frame pointer
- // (the fp register). The parameters, receiver, function, and context
+ // (the fp register). The parameters, receiver, function, and context
// are below the frame pointer.
int frame_pointer() { return parameter_count() + 3; }
- // The index of the first parameter. The receiver lies below the first
+ // The index of the first parameter. The receiver lies below the first
// parameter.
int param0_index() { return 1; }
- // The index of the context slot in the frame. It is immediately
+ // The index of the context slot in the frame. It is immediately
// below the frame pointer.
int context_index() { return frame_pointer() - 1; }
- // The index of the function slot in the frame. It is below the frame
+ // The index of the function slot in the frame. It is below the frame
// pointer and context slot.
int function_index() { return frame_pointer() - 2; }
- // The index of the first local. Between the frame pointer and the
+ // The index of the first local. Between the frame pointer and the
// locals lies the return address.
int local0_index() { return frame_pointer() + 2; }
@@ -444,7 +444,7 @@
return (frame_pointer() - index) * kPointerSize;
}
- // Record an occurrence of a register in the virtual frame. This has the
+ // Record an occurrence of a register in the virtual frame. This has the
// effect of incrementing the register's external reference count and
// of updating the index of the register's location in the frame.
void Use(Register reg, int index) {
@@ -453,7 +453,7 @@
cgen()->allocator()->Use(reg);
}
- // Record that a register reference has been dropped from the frame. This
+ // Record that a register reference has been dropped from the frame. This
// decrements the register's external reference count and invalidates the
// index of the register's location in the frame.
void Unuse(Register reg) {
@@ -467,7 +467,7 @@
// constant.
void SpillElementAt(int index);
- // Sync the element at a particular index. If it is a register or
+ // Sync the element at a particular index. If it is a register or
// constant that disagrees with the value on the stack, write it to memory.
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
@@ -483,7 +483,7 @@
// Push a copy of a frame slot (typically a local or parameter) on top of
// the frame.
- void PushFrameSlotAt(int index);
+ inline void PushFrameSlotAt(int index);
// Push a the value of a frame slot (typically a local or parameter) on
// top of the frame and invalidate the slot.
@@ -494,7 +494,7 @@
void StoreToFrameSlotAt(int index);
// Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
+ // on the frame. Sync all other frame elements.
// Then drop dropped_args elements from the virtual frame, to match
// the effect of an upcoming call that will drop them from the stack.
void PrepareForCall(int spilled_args, int dropped_args);
@@ -515,14 +515,14 @@
// Make the memory-to-register and constant-to-register moves
// needed to make this frame equal the expected frame.
// Called after all register-to-memory and register-to-register
- // moves have been made. After this function returns, the frames
+ // moves have been made. After this function returns, the frames
// should be equal.
void MergeMoveMemoryToRegisters(VirtualFrame* expected);
// Invalidates a frame slot (puts an invalid frame element in it).
// Copies on the frame are correctly handled, and if this slot was
// the backing store of copies, the index of the new backing store
- // is returned. Otherwise, returns kIllegalIndex.
+ // is returned. Otherwise, returns kIllegalIndex.
// Register counts are correctly updated.
int InvalidateFrameSlotAt(int index);
@@ -534,7 +534,7 @@
// (via PrepareForCall).
void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
- bool Equals(VirtualFrame* other);
+ inline bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
friend class DeferredCode;