Update V8 to version 4.1.0.21
This is a cherry-pick of all commits up to and including the
4.1.0.21 cherry-pick in Chromium.
Original commit message:
Version 4.1.0.21 (cherry-pick)
Merged 206e9136bde0f2b5ae8cb77afbb1e7833e5bd412
Unlink pages from the space page list after evacuation.
BUG=430201
LOG=N
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/953813002
Cr-Commit-Position: refs/branch-heads/4.1@{#22}
Cr-Branched-From: 2e08d2a7aa9d65d269d8c57aba82eb38a8cb0a18-refs/heads/candidates@{#25353}
---
FPIIM-449
Change-Id: I8c23c7bbb70772b4858fe8a47b64fa97ee0d1f8c
diff --git a/src/compiler/mips64/OWNERS b/src/compiler/mips64/OWNERS
new file mode 100644
index 0000000..5508ba6
--- /dev/null
+++ b/src/compiler/mips64/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
new file mode 100644
index 0000000..dee7705
--- /dev/null
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -0,0 +1,1444 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/mips/macro-assembler-mips.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// TODO(plind): Possibly avoid using these lithium names.
+#define kScratchReg kLithiumScratchReg
+#define kScratchReg2 kLithiumScratchReg2
+#define kScratchDoubleReg kLithiumScratchDouble
+
+
+// TODO(plind): consider renaming these macros.
+#define TRACE_MSG(msg) \
+ PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
+ __LINE__)
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
+ __LINE__)
+
+
+// Adds Mips-specific methods to convert InstructionOperands.
+class MipsOperandConverter FINAL : public InstructionOperandConverter {
+ public:
+ MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ FloatRegister OutputSingleRegister(int index = 0) {
+ return ToSingleRegister(instr_->OutputAt(index));
+ }
+
+ FloatRegister InputSingleRegister(int index) {
+ return ToSingleRegister(instr_->InputAt(index));
+ }
+
+ FloatRegister ToSingleRegister(InstructionOperand* op) {
+ // Single (Float) and Double register namespace is same on MIPS,
+ // both are typedefs of FPURegister.
+ return ToDoubleRegister(op);
+ }
+
+ Operand InputImmediate(int index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kInt64:
+ return Operand(constant.ToInt64());
+ case Constant::kFloat32:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ case Constant::kFloat64:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ case Constant::kExternalReference:
+ case Constant::kHeapObject:
+ // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
+ // maybe not done on arm due to const pool ??
+ break;
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
+ break;
+ }
+ UNREACHABLE();
+ return Operand(zero_reg);
+ }
+
+ Operand InputOperand(int index) {
+ InstructionOperand* op = instr_->InputAt(index);
+ if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ }
+ return InputImmediate(index);
+ }
+
+ MemOperand MemoryOperand(int* first_index) {
+ const int index = *first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ break;
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ // TODO(plind): r6 address mode, to be implemented ...
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return MemOperand(no_reg);
+ }
+
+ MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK(op != NULL);
+ DCHECK(!op->IsRegister());
+ DCHECK(!op->IsDoubleRegister());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ // The linkage computes where all spill slots are located.
+ FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+
+static inline bool HasRegisterInput(Instruction* instr, int index) {
+ return instr->InputAt(index)->IsRegister();
+}
+
+
+namespace {
+
+class OutOfLineLoadSingle FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ __ Move(result_, std::numeric_limits<float>::quiet_NaN());
+ }
+
+ private:
+ FloatRegister const result_;
+};
+
+
+class OutOfLineLoadDouble FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ __ Move(result_, std::numeric_limits<double>::quiet_NaN());
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+ public:
+ OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL { __ mov(result_, zero_reg); }
+
+ private:
+ Register const result_;
+};
+
+
+class OutOfLineRound : public OutOfLineCode {
+ public:
+ OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ // Handle rounding to zero case where sign has to be preserved.
+ // High bits of double input already in kScratchReg.
+ __ dsrl(at, kScratchReg, 31);
+ __ dsll(at, at, 31);
+ __ mthc1(at, result_);
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineTruncate FINAL : public OutOfLineRound {
+ public:
+ OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineFloor FINAL : public OutOfLineRound {
+ public:
+ OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineCeil FINAL : public OutOfLineRound {
+ public:
+ OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineRound(gen, result) {}
+};
+
+
+} // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
+ do { \
+ auto result = i.Output##width##Register(); \
+ auto ool = new (zone()) OutOfLineLoad##width(this, result); \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
+ __ Daddu(at, i.InputRegister(2), offset); \
+ __ asm_instr(result, MemOperand(at, 0)); \
+ } else { \
+ auto offset = i.InputOperand(0).immediate(); \
+ __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
+ __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
+ } \
+ __ bind(ool->exit()); \
+ } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+ do { \
+ auto result = i.OutputRegister(); \
+ auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
+ __ Daddu(at, i.InputRegister(2), offset); \
+ __ asm_instr(result, MemOperand(at, 0)); \
+ } else { \
+ auto offset = i.InputOperand(0).immediate(); \
+ __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
+ __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
+ } \
+ __ bind(ool->exit()); \
+ } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
+ do { \
+ Label done; \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ auto value = i.Input##width##Register(2); \
+ __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
+ __ Daddu(at, i.InputRegister(3), offset); \
+ __ asm_instr(value, MemOperand(at, 0)); \
+ } else { \
+ auto offset = i.InputOperand(0).immediate(); \
+ auto value = i.Input##width##Register(2); \
+ __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
+ __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
+ } \
+ __ bind(&done); \
+ } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+ do { \
+ Label done; \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ auto value = i.InputRegister(2); \
+ __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
+ __ Daddu(at, i.InputRegister(3), offset); \
+ __ asm_instr(value, MemOperand(at, 0)); \
+ } else { \
+ auto offset = i.InputOperand(0).immediate(); \
+ auto value = i.InputRegister(2); \
+ __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
+ __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
+ } \
+ __ bind(&done); \
+ } while (0)
+
+
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
+ do { \
+ auto ool = \
+ new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
+ Label done; \
+ __ mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
+ __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
+ HeapNumber::kExponentBits); \
+ __ Branch(USE_DELAY_SLOT, &done, hs, at, \
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
+ __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ dmfc1(at, i.OutputDoubleRegister()); \
+ __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
+ __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
+ __ bind(ool->exit()); \
+ __ bind(&done); \
+ } while (0)
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ InstructionCode opcode = instr->opcode();
+
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ } else {
+ __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
+ __ Call(at);
+ }
+ AddSafepointAndDeopt(instr);
+ break;
+ }
+ case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+ }
+
+ __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Call(at);
+ AddSafepointAndDeopt(instr);
+ break;
+ }
+ case kArchJmp:
+ AssembleArchJump(i.InputRpo(0));
+ break;
+ case kArchNop:
+ // don't emit code for nops.
+ break;
+ case kArchRet:
+ AssembleReturn();
+ break;
+ case kArchStackPointer:
+ __ mov(i.OutputRegister(), sp);
+ break;
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMips64Add:
+ __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Dadd:
+ __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Sub:
+ __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Dsub:
+ __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Mul:
+ __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64MulHigh:
+ __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64MulHighU:
+ __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Div:
+ __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64DivU:
+ __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Mod:
+ __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64ModU:
+ __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Dmul:
+ __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Ddiv:
+ __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64DdivU:
+ __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Dmod:
+ __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64DmodU:
+ __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64And:
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Or:
+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Xor:
+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Shl:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int32_t imm = i.InputOperand(1).immediate();
+ __ sll(i.OutputRegister(), i.InputRegister(0), imm);
+ }
+ break;
+ case kMips64Shr:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int32_t imm = i.InputOperand(1).immediate();
+ __ srl(i.OutputRegister(), i.InputRegister(0), imm);
+ }
+ break;
+ case kMips64Sar:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int32_t imm = i.InputOperand(1).immediate();
+ __ sra(i.OutputRegister(), i.InputRegister(0), imm);
+ }
+ break;
+ case kMips64Ext:
+ __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ break;
+ case kMips64Dext:
+ __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ break;
+ case kMips64Dshl:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int32_t imm = i.InputOperand(1).immediate();
+ if (imm < 32) {
+ __ dsll(i.OutputRegister(), i.InputRegister(0), imm);
+ } else {
+ __ dsll32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+ }
+ }
+ break;
+ case kMips64Dshr:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int32_t imm = i.InputOperand(1).immediate();
+ if (imm < 32) {
+ __ dsrl(i.OutputRegister(), i.InputRegister(0), imm);
+ } else {
+ __ dsrl32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+ }
+ }
+ break;
+ case kMips64Dsar:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int32_t imm = i.InputOperand(1).immediate();
+ if (imm < 32) {
+ __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
+ } else {
+ __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+ }
+ }
+ break;
+ case kMips64Ror:
+ __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Dror:
+ __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Tst:
+ case kMips64Tst32:
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kMips64Cmp:
+ case kMips64Cmp32:
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kMips64Mov:
+ // TODO(plind): Should we combine mov/li like this, or use separate instr?
+ // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
+ if (HasRegisterInput(instr, 0)) {
+ __ mov(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ li(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+
+ case kMips64CmpD:
+ // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
+ break;
+ case kMips64AddD:
+ // TODO(plind): add special case: combine mult & add.
+ __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMips64SubD:
+ __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMips64MulD:
+ // TODO(plind): add special case: right op is -1.0, see arm port.
+ __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMips64DivD:
+ __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMips64ModD: {
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ FrameScope scope(masm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2, kScratchReg);
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(i.OutputDoubleRegister());
+ break;
+ }
+ case kMips64Float64Floor: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
+ break;
+ }
+ case kMips64Float64Ceil: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+ break;
+ }
+ case kMips64Float64RoundTruncate: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
+ break;
+ }
+ case kMips64SqrtD: {
+ __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kMips64CvtSD: {
+ __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kMips64CvtDS: {
+ __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kMips64CvtDW: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ mtc1(i.InputRegister(0), scratch);
+ __ cvt_d_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kMips64CvtDUw: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
+ break;
+ }
+ case kMips64TruncWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ // Other arches use round to zero here, so we follow.
+ __ trunc_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64TruncUwD: {
+ FPURegister scratch = kScratchDoubleReg;
+ // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
+ __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+ break;
+ }
+ // ... more basic instructions ...
+
+ case kMips64Lbu:
+ __ lbu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMips64Lb:
+ __ lb(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMips64Sb:
+ __ sb(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kMips64Lhu:
+ __ lhu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMips64Lh:
+ __ lh(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMips64Sh:
+ __ sh(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kMips64Lw:
+ __ lw(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMips64Ld:
+ __ ld(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMips64Sw:
+ __ sw(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kMips64Sd:
+ __ sd(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kMips64Lwc1: {
+ __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
+ break;
+ }
+ case kMips64Swc1: {
+ int index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ __ swc1(i.InputSingleRegister(index), operand);
+ break;
+ }
+ case kMips64Ldc1:
+ __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kMips64Sdc1:
+ __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
+ break;
+ case kMips64Push:
+ __ Push(i.InputRegister(0));
+ break;
+ case kMips64StackClaim: {
+ int words = MiscField::decode(instr->opcode());
+ __ Dsubu(sp, sp, Operand(words << kPointerSizeLog2));
+ break;
+ }
+ case kMips64StoreToStackSlot: {
+ int slot = MiscField::decode(instr->opcode());
+ __ sd(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
+ break;
+ }
+ case kMips64StoreWriteBarrier: {
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ __ daddu(index, object, index);
+ __ sd(value, MemOperand(index));
+ SaveFPRegsMode mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ RAStatus ra_status = kRAHasNotBeenSaved;
+ __ RecordWrite(object, index, value, ra_status, mode);
+ break;
+ }
+ case kCheckedLoadInt8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
+ break;
+ case kCheckedLoadUint8:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
+ break;
+ case kCheckedLoadInt16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
+ break;
+ case kCheckedLoadUint16:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
+ break;
+ case kCheckedLoadWord32:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
+ break;
+ case kCheckedLoadFloat32:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
+ break;
+ case kCheckedLoadFloat64:
+ ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
+ break;
+ case kCheckedStoreWord8:
+ ASSEMBLE_CHECKED_STORE_INTEGER(sb);
+ break;
+ case kCheckedStoreWord16:
+ ASSEMBLE_CHECKED_STORE_INTEGER(sh);
+ break;
+ case kCheckedStoreWord32:
+ ASSEMBLE_CHECKED_STORE_INTEGER(sw);
+ break;
+ case kCheckedStoreFloat32:
+ ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
+ break;
+ case kCheckedStoreFloat64:
+ ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
+ break;
+ }
+}
+
+
+#define UNSUPPORTED_COND(opcode, condition) \
+ OFStream out(stdout); \
+ out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
+ UNIMPLEMENTED();
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ MipsOperandConverter i(this, instr);
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ Condition cc = kNoCondition;
+
+ // MIPS does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit mips psuedo-instructions, which are handled here by branch
+ // instructions that do the actual comparison. Essential that the input
+ // registers to compare psuedo-op are not modified before this branch op, as
+ // they are tested here.
+ // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
+ // not separated by other instructions.
+
+ if (instr->arch_opcode() == kMips64Tst) {
+ switch (branch->condition) {
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kEqual:
+ cc = eq;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Tst, branch->condition);
+ break;
+ }
+ __ And(at, i.InputRegister(0), i.InputOperand(1));
+ __ Branch(tlabel, cc, at, Operand(zero_reg));
+ } else if (instr->arch_opcode() == kMips64Tst32) {
+ switch (branch->condition) {
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kEqual:
+ cc = eq;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Tst32, branch->condition);
+ break;
+ }
+ // Zero-extend registers on MIPS64 only 64-bit operand
+ // branch and compare op. is available.
+ // This is a disadvantage to perform 32-bit operation on MIPS64.
+ // Try to force globally in front-end Word64 representation to be preferred
+ // for MIPS64 even for Word32.
+ __ And(at, i.InputRegister(0), i.InputOperand(1));
+ __ Dext(at, at, 0, 32);
+ __ Branch(tlabel, cc, at, Operand(zero_reg));
+ } else if (instr->arch_opcode() == kMips64Dadd ||
+ instr->arch_opcode() == kMips64Dsub) {
+ switch (branch->condition) {
+ case kOverflow:
+ cc = ne;
+ break;
+ case kNotOverflow:
+ cc = eq;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Dadd, branch->condition);
+ break;
+ }
+
+ __ dsra32(kScratchReg, i.OutputRegister(), 0);
+ __ sra(at, i.OutputRegister(), 31);
+ __ Branch(tlabel, cc, at, Operand(kScratchReg));
+ } else if (instr->arch_opcode() == kMips64Cmp) {
+ switch (branch->condition) {
+ case kEqual:
+ cc = eq;
+ break;
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kSignedLessThan:
+ cc = lt;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = ge;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = le;
+ break;
+ case kSignedGreaterThan:
+ cc = gt;
+ break;
+ case kUnsignedLessThan:
+ cc = lo;
+ break;
+ case kUnsignedGreaterThanOrEqual:
+ cc = hs;
+ break;
+ case kUnsignedLessThanOrEqual:
+ cc = ls;
+ break;
+ case kUnsignedGreaterThan:
+ cc = hi;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Cmp, branch->condition);
+ break;
+ }
+ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+
+ if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
+
+ } else if (instr->arch_opcode() == kMips64Cmp32) {
+ switch (branch->condition) {
+ case kEqual:
+ cc = eq;
+ break;
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kSignedLessThan:
+ cc = lt;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = ge;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = le;
+ break;
+ case kSignedGreaterThan:
+ cc = gt;
+ break;
+ case kUnsignedLessThan:
+ cc = lo;
+ break;
+ case kUnsignedGreaterThanOrEqual:
+ cc = hs;
+ break;
+ case kUnsignedLessThanOrEqual:
+ cc = ls;
+ break;
+ case kUnsignedGreaterThan:
+ cc = hi;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Cmp32, branch->condition);
+ break;
+ }
+
+ switch (branch->condition) {
+ case kEqual:
+ case kNotEqual:
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kSignedLessThanOrEqual:
+ case kSignedGreaterThan:
+ // Sign-extend registers on MIPS64 only 64-bit operand
+ // branch and compare op. is available.
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll(i.InputRegister(1), i.InputRegister(1), 0);
+ }
+ break;
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan:
+ // Zero-extend registers on MIPS64 only 64-bit operand
+ // branch and compare op. is available.
+ __ Dext(i.InputRegister(0), i.InputRegister(0), 0, 32);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
+ }
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Cmp, branch->condition);
+ break;
+ }
+ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+
+ if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
+ } else if (instr->arch_opcode() == kMips64CmpD) {
+ // TODO(dusmil) optimize unordered checks to use less instructions
+ // even if we have to unfold BranchF macro.
+ Label* nan = flabel;
+ switch (branch->condition) {
+ case kUnorderedEqual:
+ cc = eq;
+ break;
+ case kUnorderedNotEqual:
+ cc = ne;
+ nan = tlabel;
+ break;
+ case kUnorderedLessThan:
+ cc = lt;
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ cc = ge;
+ nan = tlabel;
+ break;
+ case kUnorderedLessThanOrEqual:
+ cc = le;
+ break;
+ case kUnorderedGreaterThan:
+ cc = gt;
+ nan = tlabel;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64CmpD, branch->condition);
+ break;
+ }
+ __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+
+ if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
+ instr->arch_opcode());
+ UNIMPLEMENTED();
+ }
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ MipsOperandConverter i(this, instr);
+ Label done;
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ Label false_value;
+ DCHECK_NE(0, instr->OutputCount());
+ Register result = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = kNoCondition;
+
+ // MIPS does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit mips pseudo-instructions, which are checked and handled here.
+
+ // For materializations, we use delay slot to set the result true, and
+ // in the false case, where we fall through the branch, we reset the result
+ // false.
+
+ if (instr->arch_opcode() == kMips64Tst) {
+ switch (condition) {
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kEqual:
+ cc = eq;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Tst, condition);
+ break;
+ }
+ __ And(at, i.InputRegister(0), i.InputOperand(1));
+ __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
+ __ li(result, Operand(1)); // In delay slot.
+ } else if (instr->arch_opcode() == kMips64Tst32) {
+ switch (condition) {
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kEqual:
+ cc = eq;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Tst, condition);
+ break;
+ }
+ // Zero-extend register on MIPS64 only 64-bit operand
+ // branch and compare op. is available.
+ __ And(at, i.InputRegister(0), i.InputOperand(1));
+ __ Dext(at, at, 0, 32);
+ __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
+ __ li(result, Operand(1)); // In delay slot.
+ } else if (instr->arch_opcode() == kMips64Dadd ||
+ instr->arch_opcode() == kMips64Dsub) {
+ switch (condition) {
+ case kOverflow:
+ cc = ne;
+ break;
+ case kNotOverflow:
+ cc = eq;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64DAdd, condition);
+ break;
+ }
+ __ dsra32(kScratchReg, i.OutputRegister(), 0);
+ __ sra(at, i.OutputRegister(), 31);
+ __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(kScratchReg));
+ __ li(result, Operand(1)); // In delay slot.
+ } else if (instr->arch_opcode() == kMips64Cmp) {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ switch (condition) {
+ case kEqual:
+ cc = eq;
+ break;
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kSignedLessThan:
+ cc = lt;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = ge;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = le;
+ break;
+ case kSignedGreaterThan:
+ cc = gt;
+ break;
+ case kUnsignedLessThan:
+ cc = lo;
+ break;
+ case kUnsignedGreaterThanOrEqual:
+ cc = hs;
+ break;
+ case kUnsignedLessThanOrEqual:
+ cc = ls;
+ break;
+ case kUnsignedGreaterThan:
+ cc = hi;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Cmp, condition);
+ break;
+ }
+ __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
+ __ li(result, Operand(1)); // In delay slot.
+ } else if (instr->arch_opcode() == kMips64Cmp32) {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ switch (condition) {
+ case kEqual:
+ cc = eq;
+ break;
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kSignedLessThan:
+ cc = lt;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = ge;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = le;
+ break;
+ case kSignedGreaterThan:
+ cc = gt;
+ break;
+ case kUnsignedLessThan:
+ cc = lo;
+ break;
+ case kUnsignedGreaterThanOrEqual:
+ cc = hs;
+ break;
+ case kUnsignedLessThanOrEqual:
+ cc = ls;
+ break;
+ case kUnsignedGreaterThan:
+ cc = hi;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Cmp, condition);
+ break;
+ }
+
+ switch (condition) {
+ case kEqual:
+ case kNotEqual:
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kSignedLessThanOrEqual:
+ case kSignedGreaterThan:
+ // Sign-extend registers on MIPS64 only 64-bit operand
+ // branch and compare op. is available.
+ __ sll(left, left, 0);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll(i.InputRegister(1), i.InputRegister(1), 0);
+ }
+ break;
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan:
+ // Zero-extend registers on MIPS64 only 64-bit operand
+ // branch and compare op. is available.
+ __ Dext(left, left, 0, 32);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
+ }
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Cmp32, condition);
+ break;
+ }
+ __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
+ __ li(result, Operand(1)); // In delay slot.
+ } else if (instr->arch_opcode() == kMips64CmpD) {
+ FPURegister left = i.InputDoubleRegister(0);
+ FPURegister right = i.InputDoubleRegister(1);
+ // TODO(plind): Provide NaN-testing macro-asm function without need for
+ // BranchF.
+ FPURegister dummy1 = f0;
+ FPURegister dummy2 = f2;
+ switch (condition) {
+ case kUnorderedEqual:
+ // TODO(plind): improve the NaN testing throughout this function.
+ __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+ cc = eq;
+ break;
+ case kUnorderedNotEqual:
+ __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+ __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
+ cc = ne;
+ break;
+ case kUnorderedLessThan:
+ __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+ cc = lt;
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+ __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
+ cc = ge;
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+ cc = le;
+ break;
+ case kUnorderedGreaterThan:
+ __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+ __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
+ cc = gt;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Cmp, condition);
+ break;
+ }
+ __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
+ __ li(result, Operand(1)); // In delay slot - branch taken returns 1.
+ // Fall-thru (branch not taken) returns 0.
+
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
+ instr->arch_opcode());
+ TRACE_UNIMPL();
+ UNIMPLEMENTED();
+ }
+ // Fallthru case is the false materialization.
+ __ bind(&false_value);
+ __ li(result, Operand(static_cast<int64_t>(0)));
+ __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ // TODO(plind): make callee save size const, possibly DCHECK it.
+ int register_save_area_size = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ register_save_area_size += kPointerSize;
+ }
+ frame()->SetRegisterSaveAreaSize(register_save_area_size);
+ __ MultiPush(saves);
+ }
+ } else if (descriptor->IsJSFunctionCall()) {
+ CompilationInfo* info = this->info();
+ __ Prologue(info->IsCodePreAgingActive());
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ // +2 for return address and saved frame pointer.
+ int receiver_slot = info->scope()->num_parameters() + 2;
+ __ ld(a2, MemOperand(fp, receiver_slot * kPointerSize));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&ok, ne, a2, Operand(at));
+
+ __ ld(a2, GlobalObjectOperand());
+ __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
+ __ sd(a2, MemOperand(fp, receiver_slot * kPointerSize));
+ __ bind(&ok);
+ }
+ } else {
+ __ StubPrologue();
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ }
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ Dsubu(sp, sp, Operand(stack_slots * kPointerSize));
+ }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (frame()->GetRegisterSaveAreaSize() > 0) {
+ // Remove this frame's spill slots first.
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ Daddu(sp, sp, Operand(stack_slots * kPointerSize));
+ }
+ // Restore registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+ }
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+ __ Ret();
+ } else {
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : 0;
+ __ DropAndRet(pop_count);
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ MipsOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(g.ToRegister(destination), src);
+ } else {
+ __ sd(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ ld(g.ToRegister(destination), src);
+ } else {
+ Register temp = kScratchReg;
+ __ ld(temp, src);
+ __ sd(temp, g.ToMemOperand(destination));
+ }
+ } else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ li(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kFloat32:
+ __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ break;
+ case Constant::kInt64:
+ __ li(dst, Operand(src.ToInt64()));
+ break;
+ case Constant::kFloat64:
+ __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ break;
+ case Constant::kExternalReference:
+ __ li(dst, Operand(src.ToExternalReference()));
+ break;
+ case Constant::kHeapObject:
+ __ li(dst, src.ToHeapObject());
+ break;
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
+ break;
+ }
+ if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
+ } else if (src.type() == Constant::kFloat32) {
+ if (destination->IsDoubleStackSlot()) {
+ MemOperand dst = g.ToMemOperand(destination);
+ __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ sw(at, dst);
+ } else {
+ FloatRegister dst = g.ToSingleRegister(destination);
+ __ Move(dst, src.ToFloat32());
+ }
+ } else {
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ DoubleRegister dst = destination->IsDoubleRegister()
+ ? g.ToDoubleRegister(destination)
+ : kScratchDoubleReg;
+ __ Move(dst, src.ToFloat64());
+ if (destination->IsDoubleStackSlot()) {
+ __ sdc1(dst, g.ToMemOperand(destination));
+ }
+ }
+ } else if (source->IsDoubleRegister()) {
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ sdc1(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ ldc1(g.ToDoubleRegister(destination), src);
+ } else {
+ FPURegister temp = kScratchDoubleReg;
+ __ ldc1(temp, src);
+ __ sdc1(temp, g.ToMemOperand(destination));
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ MipsOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ // Register-register.
+ Register temp = kScratchReg;
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ mov(temp, src);
+ __ ld(src, dst);
+ __ sd(temp, dst);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsStackSlot());
+ Register temp_0 = kScratchReg;
+ Register temp_1 = kScratchReg2;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ ld(temp_0, src);
+ __ ld(temp_1, dst);
+ __ sd(temp_0, dst);
+ __ sd(temp_1, src);
+ } else if (source->IsDoubleRegister()) {
+ FPURegister temp = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp, src);
+ __ ldc1(src, dst);
+ __ sdc1(temp, dst);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ DCHECK(destination->IsDoubleStackSlot());
+ Register temp_0 = kScratchReg;
+ FPURegister temp_1 = kScratchDoubleReg;
+ MemOperand src0 = g.ToMemOperand(source);
+ MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
+ MemOperand dst0 = g.ToMemOperand(destination);
+ MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
+ __ ldc1(temp_1, dst0); // Save destination in temp_1.
+ __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ sw(temp_0, dst0);
+ __ lw(temp_0, src1);
+ __ sw(temp_0, dst1);
+ __ sdc1(temp_1, src0);
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+ // Unused on 32-bit ARM. Still exists on 64-bit arm.
+ // TODO(plind): Unclear when this is called now. Understand, fix if needed.
+ __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
+}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block tramoline pool emission for duration of padding.
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
+ }
+ }
+ }
+ MarkLazyDeoptSite();
+}
+
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
new file mode 100644
index 0000000..dd019f9
--- /dev/null
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -0,0 +1,108 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+#define V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// MIPS64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Mips64Add) \
+ V(Mips64Dadd) \
+ V(Mips64Sub) \
+ V(Mips64Dsub) \
+ V(Mips64Mul) \
+ V(Mips64MulHigh) \
+ V(Mips64MulHighU) \
+ V(Mips64Dmul) \
+ V(Mips64Div) \
+ V(Mips64Ddiv) \
+ V(Mips64DivU) \
+ V(Mips64DdivU) \
+ V(Mips64Mod) \
+ V(Mips64Dmod) \
+ V(Mips64ModU) \
+ V(Mips64DmodU) \
+ V(Mips64And) \
+ V(Mips64Or) \
+ V(Mips64Xor) \
+ V(Mips64Shl) \
+ V(Mips64Shr) \
+ V(Mips64Sar) \
+ V(Mips64Ext) \
+ V(Mips64Dext) \
+ V(Mips64Dshl) \
+ V(Mips64Dshr) \
+ V(Mips64Dsar) \
+ V(Mips64Ror) \
+ V(Mips64Dror) \
+ V(Mips64Mov) \
+ V(Mips64Tst) \
+ V(Mips64Tst32) \
+ V(Mips64Cmp) \
+ V(Mips64Cmp32) \
+ V(Mips64CmpD) \
+ V(Mips64AddD) \
+ V(Mips64SubD) \
+ V(Mips64MulD) \
+ V(Mips64DivD) \
+ V(Mips64ModD) \
+ V(Mips64SqrtD) \
+ V(Mips64Float64Floor) \
+ V(Mips64Float64Ceil) \
+ V(Mips64Float64RoundTruncate) \
+ V(Mips64CvtSD) \
+ V(Mips64CvtDS) \
+ V(Mips64TruncWD) \
+ V(Mips64TruncUwD) \
+ V(Mips64CvtDW) \
+ V(Mips64CvtDUw) \
+ V(Mips64Lb) \
+ V(Mips64Lbu) \
+ V(Mips64Sb) \
+ V(Mips64Lh) \
+ V(Mips64Lhu) \
+ V(Mips64Sh) \
+ V(Mips64Ld) \
+ V(Mips64Lw) \
+ V(Mips64Sw) \
+ V(Mips64Sd) \
+ V(Mips64Lwc1) \
+ V(Mips64Swc1) \
+ V(Mips64Ldc1) \
+ V(Mips64Sdc1) \
+ V(Mips64Push) \
+ V(Mips64StoreToStackSlot) \
+ V(Mips64StackClaim) \
+ V(Mips64StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+// TODO(plind): Add the new r6 address modes.
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
new file mode 100644
index 0000000..35ad16b
--- /dev/null
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -0,0 +1,1079 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+
+// Adds Mips-specific methods for generating InstructionOperands.
+class Mips64OperandGenerator FINAL : public OperandGenerator {
+ public:
+ explicit Mips64OperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+ if (CanBeImmediate(node, opcode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool CanBeImmediate(Node* node, InstructionCode opcode) {
+ int64_t value;
+ if (node->opcode() == IrOpcode::kInt32Constant)
+ value = OpParameter<int32_t>(node);
+ else if (node->opcode() == IrOpcode::kInt64Constant)
+ value = OpParameter<int64_t>(node);
+ else
+ return false;
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kMips64Shl:
+ case kMips64Sar:
+ case kMips64Shr:
+ return is_uint5(value);
+ case kMips64Dshl:
+ case kMips64Dsar:
+ case kMips64Dshr:
+ return is_uint6(value);
+ case kMips64Xor:
+ return is_uint16(value);
+ case kMips64Ldc1:
+ case kMips64Sdc1:
+ return is_int16(value + kIntSize);
+ default:
+ return is_int16(value);
+ }
+ }
+
+
+ bool CanBeImmediate(Node* node, InstructionCode opcode,
+ FlagsContinuation* cont) {
+ int64_t value;
+ if (node->opcode() == IrOpcode::kInt32Constant)
+ value = OpParameter<int32_t>(node);
+ else if (node->opcode() == IrOpcode::kInt64Constant)
+ value = OpParameter<int64_t>(node);
+ else
+ return false;
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kMips64Cmp32:
+ switch (cont->condition()) {
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan:
+ // Immediate operands for unsigned 32-bit compare operations
+ // should not be sign-extended.
+ return is_uint15(value);
+ default:
+ return false;
+ }
+ default:
+ return is_int16(value);
+ }
+ }
+
+
+ private:
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+ TRACE_UNIMPL();
+ return false;
+ }
+};
+
+
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Mips64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Mips64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Mips64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), opcode));
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ Mips64OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[4];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
+ }
+
+ DCHECK_NE(0, input_count);
+ DCHECK_NE(0, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+ MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ Mips64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kMips64Lwc1;
+ break;
+ case kRepFloat64:
+ opcode = kMips64Ldc1;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = typ == kTypeUint32 ? kMips64Lbu : kMips64Lb;
+ break;
+ case kRepWord16:
+ opcode = typ == kTypeUint32 ? kMips64Lhu : kMips64Lh;
+ break;
+ case kRepWord32:
+ opcode = kMips64Lw;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kMips64Ld;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand* addr_reg = g.TempRegister();
+ Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ MachineType rep = RepresentationOf(store_rep.machine_type());
+ if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+ DCHECK(rep == kRepTagged);
+ // TODO(dcarney): refactor RecordWrite function to take temp registers
+ // and pass them here instead of using fixed regs
+ // TODO(dcarney): handle immediate indices.
+ InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
+ Emit(kMips64StoreWriteBarrier, NULL, g.UseFixed(base, t0),
+ g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
+ return;
+ }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepFloat32:
+ opcode = kMips64Swc1;
+ break;
+ case kRepFloat64:
+ opcode = kMips64Sdc1;
+ break;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kMips64Sb;
+ break;
+ case kRepWord16:
+ opcode = kMips64Sh;
+ break;
+ case kRepWord32:
+ opcode = kMips64Sw;
+ break;
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kMips64Sd;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand* addr_reg = g.TempRegister();
+ Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg,
+ g.TempImmediate(0), g.UseRegister(value));
+ }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ VisitBinop(this, node, kMips64And);
+}
+
+
+void InstructionSelector::VisitWord64And(Node* node) {
+ VisitBinop(this, node, kMips64And);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kMips64Or);
+}
+
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+ VisitBinop(this, node, kMips64Or);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ VisitBinop(this, node, kMips64Xor);
+}
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ VisitBinop(this, node, kMips64Xor);
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ VisitRRO(this, kMips64Shl, node);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ VisitRRO(this, kMips64Shr, node);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ VisitRRO(this, kMips64Sar, node);
+}
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ VisitRRO(this, kMips64Dshl, node);
+}
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ VisitRRO(this, kMips64Dshr, node);
+}
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ VisitRRO(this, kMips64Dsar, node);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, kMips64Ror, node);
+}
+
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitRRO(this, kMips64Dror, node);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ Mips64OperandGenerator g(this);
+ // TODO(plind): Consider multiply & add optimization from arm port.
+ VisitBinop(this, node, kMips64Add);
+}
+
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+ Mips64OperandGenerator g(this);
+ // TODO(plind): Consider multiply & add optimization from arm port.
+ VisitBinop(this, node, kMips64Dadd);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ VisitBinop(this, node, kMips64Sub);
+}
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ VisitBinop(this, node, kMips64Dsub);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().HasValue() && m.right().Value() > 0) {
+ int32_t value = m.right().Value();
+ if (base::bits::IsPowerOfTwo32(value)) {
+ Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo32(value - 1)) {
+ InstructionOperand* temp = g.TempRegister();
+ Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value - 1)));
+ Emit(kMips64Add | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
+ return;
+ }
+ if (base::bits::IsPowerOfTwo32(value + 1)) {
+ InstructionOperand* temp = g.TempRegister();
+ Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value + 1)));
+ Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Emit(kMips64Mul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64MulHigh, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand* const dmul_operand = g.TempRegister();
+ Emit(kMips64MulHighU, dmul_operand, g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ Emit(kMips64Ext, g.DefineAsRegister(node), dmul_operand, g.TempImmediate(0),
+ g.TempImmediate(32));
+}
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ // TODO(dusmil): Add optimization for shifts larger than 32.
+ if (m.right().HasValue() && m.right().Value() > 0) {
+ int64_t value = m.right().Value();
+ if (base::bits::IsPowerOfTwo32(value)) {
+ Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo32(value - 1)) {
+ InstructionOperand* temp = g.TempRegister();
+ Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value - 1)));
+ Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
+ return;
+ }
+ if (base::bits::IsPowerOfTwo32(value + 1)) {
+ InstructionOperand* temp = g.TempRegister();
+ Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value + 1)));
+ Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kMips64Div, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kMips64DivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kMips64Ddiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint64Div(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kMips64DdivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint64Mod(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64CvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64CvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64CvtDUw, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64TruncWD, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64TruncUwD, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0), g.TempImmediate(32));
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0), g.TempImmediate(32));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64CvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ VisitRRR(this, kMips64AddD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ VisitRRR(this, kMips64SubD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ VisitRRR(this, kMips64MulD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRR(this, kMips64DivD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64ModD, g.DefineAsFixed(node, f0),
+ g.UseFixed(node->InputAt(0), f12),
+ g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64SqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+ VisitRR(this, kMips64Float64Floor, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+ VisitRR(this, kMips64Float64Ceil, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ VisitRR(this, kMips64Float64RoundTruncate, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitCall(Node* node) {
+ Mips64OperandGenerator g(this);
+ const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, false);
+
+ int push_count = buffer.pushed_nodes.size();
+ if (push_count > 0) {
+ Emit(kMips64StackClaim | MiscField::encode(push_count), NULL);
+ }
+ int slot = buffer.pushed_nodes.size() - 1;
+ for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+ input != buffer.pushed_nodes.rend(); input++) {
+ Emit(kMips64StoreToStackSlot | MiscField::encode(slot), NULL,
+ g.UseRegister(*input));
+ slot--;
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ opcode = kArchCallCodeObject;
+ break;
+ }
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ Instruction* call_instr =
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+ call_instr->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ Mips64OperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ break;
+ case kRepWord16:
+ opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedLoadWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedLoadFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedLoadFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
+
+ InstructionOperand* length_operand =
+ (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
+
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), offset_operand, length_operand,
+ g.UseRegister(buffer));
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+ MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ Mips64OperandGenerator g(this);
+ Node* const buffer = node->InputAt(0);
+ Node* const offset = node->InputAt(1);
+ Node* const length = node->InputAt(2);
+ Node* const value = node->InputAt(3);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kRepWord8:
+ opcode = kCheckedStoreWord8;
+ break;
+ case kRepWord16:
+ opcode = kCheckedStoreWord16;
+ break;
+ case kRepWord32:
+ opcode = kCheckedStoreWord32;
+ break;
+ case kRepFloat32:
+ opcode = kCheckedStoreFloat32;
+ break;
+ case kRepFloat64:
+ opcode = kCheckedStoreFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
+
+ InstructionOperand* length_operand =
+ (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
+
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr, offset_operand,
+ length_operand, g.UseRegister(value), g.UseRegister(buffer));
+}
+
+
+namespace {
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand* left, InstructionOperand* right,
+ FlagsContinuation* cont) {
+ Mips64OperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ }
+}
+
+
+// Shared routine for multiple float compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Mips64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(selector, kMips64CmpD, g.UseRegister(left), g.UseRegister(right),
+ cont);
+}
+
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
+ Mips64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, opcode, cont)) {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+ cont);
+ } else if (g.CanBeImmediate(left, opcode, cont)) {
+ if (!commutative) cont->Commute();
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+ cont);
+ }
+}
+
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(selector, node, kMips64Cmp32, cont, false);
+}
+
+
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(selector, node, kMips64Cmp, cont, false);
+}
+
+} // namespace
+
+
+void EmitWordCompareZero(InstructionSelector* selector, InstructionCode opcode,
+ Node* value, FlagsContinuation* cont) {
+ Mips64OperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
+ InstructionOperand* const value_operand = g.UseRegister(value);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, nullptr, value_operand, g.TempImmediate(0),
+ g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
+ g.TempImmediate(0));
+ }
+}
+
+
+// Shared routine for word comparisons against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+ Node* value, FlagsContinuation* cont) {
+ // Initially set comparison against 0 to be 64-bit variant for branches that
+ // cannot combine.
+ InstructionCode opcode = kMips64Cmp;
+ while (selector->CanCover(user, value)) {
+ if (user->opcode() == IrOpcode::kWord32Equal) {
+ opcode = kMips64Cmp32;
+ }
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal: {
+ // Combine with comparisons against 0 by simply inverting the
+ // continuation.
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ opcode = kMips64Cmp32;
+ continue;
+ }
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(selector, value, cont);
+ }
+ case IrOpcode::kInt32LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kUint32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kWord64Equal: {
+ // Combine with comparisons against 0 by simply inverting the
+ // continuation.
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ continue;
+ }
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord64Compare(selector, value, cont);
+ }
+ case IrOpcode::kInt64LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kUint64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kFloat64Equal:
+ cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ return VisitFloat64Compare(selector, value, cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (OpParameter<size_t>(value) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either NULL, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* node = value->InputAt(0);
+ Node* result = node->FindProjection(0);
+ if (result == NULL || selector->IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kMips64Dadd, cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kMips64Dsub, cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(selector, value, kMips64Tst32, cont, true);
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(selector, value, kMips64Tst, cont, true);
+ default:
+ break;
+ }
+ break;
+ }
+
+ // Continuation could not be combined with a compare, emit compare against 0.
+ EmitWordCompareZero(selector, opcode, value, cont);
+}
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+ VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ FlagsContinuation cont(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kMips64Dadd, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMips64Dadd, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kMips64Dsub, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMips64Dsub, &cont);
+}
+
+
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+ FlagsContinuation cont(kEqual, node);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ return MachineOperatorBuilder::kFloat64Floor |
+ MachineOperatorBuilder::kFloat64Ceil |
+ MachineOperatorBuilder::kFloat64RoundTruncate;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/mips64/linkage-mips64.cc b/src/compiler/mips64/linkage-mips64.cc
new file mode 100644
index 0000000..0e1a590
--- /dev/null
+++ b/src/compiler/mips64/linkage-mips64.cc
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct MipsLinkageHelperTraits {
+ static Register ReturnValueReg() { return v0; }
+ static Register ReturnValue2Reg() { return v1; }
+ static Register JSCallFunctionReg() { return a1; }
+ static Register ContextReg() { return cp; }
+ static Register RuntimeCallFunctionReg() { return a1; }
+ static Register RuntimeCallArgCountReg() { return a0; }
+ static RegList CCalleeSaveRegisters() {
+ return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
+ s6.bit() | s7.bit();
+ }
+ static Register CRegisterParameter(int i) {
+ static Register register_parameters[] = {a0, a1, a2, a3, a4, a5, a6, a7};
+ return register_parameters[i];
+ }
+ static int CRegisterParametersLength() { return 8; }
+};
+
+
+typedef LinkageHelper<MipsLinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+ CallDescriptor::Flags flags) {
+ return LH::GetJSCallDescriptor(zone, parameter_count, flags);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Properties properties, Zone* zone) {
+ return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+ properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
+ CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
+ return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+ flags, properties);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+ MachineSignature* sig) {
+ return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8