Upgrade V8 to version 4.9.385.28
https://chromium.googlesource.com/v8/v8/+/4.9.385.28
FPIIM-449
Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/compiler/mips/OWNERS b/src/compiler/mips/OWNERS
index 5508ba6..89455a4 100644
--- a/src/compiler/mips/OWNERS
+++ b/src/compiler/mips/OWNERS
@@ -3,3 +3,4 @@
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index dd92837..75e4b9e 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/osr.h"
#include "src/mips/macro-assembler-mips.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -35,16 +35,16 @@
// Adds Mips-specific methods to convert InstructionOperands.
-class MipsOperandConverter FINAL : public InstructionOperandConverter {
+class MipsOperandConverter final : public InstructionOperandConverter {
public:
MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- FloatRegister OutputSingleRegister(int index = 0) {
+ FloatRegister OutputSingleRegister(size_t index = 0) {
return ToSingleRegister(instr_->OutputAt(index));
}
- FloatRegister InputSingleRegister(int index) {
+ FloatRegister InputSingleRegister(size_t index) {
return ToSingleRegister(instr_->InputAt(index));
}
@@ -54,7 +54,19 @@
return ToDoubleRegister(op);
}
- Operand InputImmediate(int index) {
+ DoubleRegister InputOrZeroDoubleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister InputOrZeroSingleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputSingleRegister(index);
+ }
+
+ Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
@@ -79,7 +91,7 @@
return Operand(zero_reg);
}
- Operand InputOperand(int index) {
+ Operand InputOperand(size_t index) {
InstructionOperand* op = instr_->InputAt(index);
if (op->IsRegister()) {
return Operand(ToRegister(op));
@@ -87,8 +99,8 @@
return InputImmediate(index);
}
- MemOperand MemoryOperand(int* first_index) {
- const int index = *first_index;
+ MemOperand MemoryOperand(size_t* first_index) {
+ const size_t index = *first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
break;
@@ -103,33 +115,31 @@
return MemOperand(no_reg);
}
- MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
+ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
MemOperand ToMemOperand(InstructionOperand* op) const {
- DCHECK(op != NULL);
- DCHECK(!op->IsRegister());
- DCHECK(!op->IsDoubleRegister());
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- // The linkage computes where all spill slots are located.
- FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
-static inline bool HasRegisterInput(Instruction* instr, int index) {
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsRegister();
}
namespace {
-class OutOfLineLoadSingle FINAL : public OutOfLineCode {
+class OutOfLineLoadSingle final : public OutOfLineCode {
public:
OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
__ Move(result_, std::numeric_limits<float>::quiet_NaN());
}
@@ -138,12 +148,12 @@
};
-class OutOfLineLoadDouble FINAL : public OutOfLineCode {
+class OutOfLineLoadDouble final : public OutOfLineCode {
public:
OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
__ Move(result_, std::numeric_limits<double>::quiet_NaN());
}
@@ -152,12 +162,12 @@
};
-class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+class OutOfLineLoadInteger final : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL { __ mov(result_, zero_reg); }
+ void Generate() final { __ mov(result_, zero_reg); }
private:
Register const result_;
@@ -169,7 +179,7 @@
OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() FINAL {
+ void Generate() final {
// Handle rounding to zero case where sign has to be preserved.
// High bits of double input already in kScratchReg.
__ srl(at, kScratchReg, 31);
@@ -182,25 +192,145 @@
};
-class OutOfLineTruncate FINAL : public OutOfLineRound {
+class OutOfLineRound32 : public OutOfLineCode {
public:
- OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
+ OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final {
+ // Handle rounding to zero case where sign has to be preserved.
+ // High bits of float input already in kScratchReg.
+ __ srl(at, kScratchReg, 31);
+ __ sll(at, at, 31);
+ __ mtc1(at, result_);
+ }
+
+ private:
+ DoubleRegister const result_;
};
-class OutOfLineFloor FINAL : public OutOfLineRound {
+class OutOfLineRecordWrite final : public OutOfLineCode {
public:
- OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ Addu(scratch1_, object_, index_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
};
-class OutOfLineCeil FINAL : public OutOfLineRound {
- public:
- OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
-};
+Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedGreaterThanOrEqual:
+ return hs;
+ case kUnsignedLessThanOrEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+
+Condition FlagsConditionToConditionTst(FlagsCondition condition) {
+ switch (condition) {
+ case kNotEqual:
+ return ne;
+ case kEqual:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kNoCondition;
+}
+
+
+FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
+ FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ predicate = true;
+ return EQ;
+ case kNotEqual:
+ predicate = false;
+ return EQ;
+ case kUnsignedLessThan:
+ predicate = true;
+ return OLT;
+ case kUnsignedGreaterThanOrEqual:
+ predicate = false;
+ return ULT;
+ case kUnsignedLessThanOrEqual:
+ predicate = true;
+ return OLE;
+ case kUnsignedGreaterThan:
+ predicate = false;
+ return ULE;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ predicate = true;
+ break;
+ default:
+ predicate = true;
+ break;
+ }
+ UNREACHABLE();
+ return kNoFPUCondition;
+}
} // namespace
@@ -212,8 +342,8 @@
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ addu(at, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(at, 0)); \
+ __ addu(kScratchReg, i.InputRegister(2), offset); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
__ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
@@ -230,8 +360,8 @@
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ addu(at, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(at, 0)); \
+ __ addu(kScratchReg, i.InputRegister(2), offset); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
__ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
@@ -248,8 +378,8 @@
auto offset = i.InputRegister(0); \
auto value = i.Input##width##Register(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ addu(at, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(at, 0)); \
+ __ addu(kScratchReg, i.InputRegister(3), offset); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
auto value = i.Input##width##Register(2); \
@@ -267,8 +397,8 @@
auto offset = i.InputRegister(0); \
auto value = i.InputRegister(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ addu(at, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(at, 0)); \
+ __ addu(kScratchReg, i.InputRegister(3), offset); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
auto value = i.InputRegister(2); \
@@ -279,10 +409,15 @@
} while (0)
-#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
- do { \
- auto ool = \
- new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
+ if (IsMipsArchVariant(kMips32r6)) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \
Label done; \
__ Mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
__ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
@@ -290,14 +425,63 @@
__ Branch(USE_DELAY_SLOT, &done, hs, at, \
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
__ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
__ Move(at, kScratchReg2, i.OutputDoubleRegister()); \
__ or_(at, at, kScratchReg2); \
__ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
__ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
__ bind(ool->exit()); \
__ bind(&done); \
- } while (0)
+ }
+
+
+#define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \
+ if (IsMipsArchVariant(kMips32r6)) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ int32_t kFloat32ExponentBias = 127; \
+ int32_t kFloat32MantissaBits = 23; \
+ int32_t kFloat32ExponentBits = 8; \
+ auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
+ Label done; \
+ __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \
+ __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \
+ __ Branch(USE_DELAY_SLOT, &done, hs, at, \
+ Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \
+ __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mfc1(at, i.OutputDoubleRegister()); \
+ __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
+ __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
+ __ bind(ool->exit()); \
+ __ bind(&done); \
+ }
+
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ addiu(sp, sp, sp_slot_delta * kPointerSize);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ Subu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
// Assembles an instruction after register allocation, producing machine code.
@@ -315,7 +499,21 @@
__ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(at);
}
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallCodeObject: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
+ RelocInfo::CODE_TARGET);
+ } else {
+ __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(at);
+ }
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -329,37 +527,115 @@
__ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+ }
+
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Jump(at);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ break;
+ }
case kArchRet:
AssembleReturn();
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), sp);
break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), fp);
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ Addu(at, object, index);
+ __ sw(value, MemOperand(at));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kMipsAdd:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMipsAddOvf:
- __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), kCompareReg, kScratchReg);
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
break;
case kMipsSub:
__ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMipsSubOvf:
- __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), kCompareReg, kScratchReg);
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
break;
case kMipsMul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -372,9 +648,19 @@
break;
case kMipsDiv:
__ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMipsDivU:
__ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMipsMod:
__ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -388,9 +674,20 @@
case kMipsOr:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMipsNor:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK(i.InputOperand(1).immediate() == 0);
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
case kMipsXor:
__ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMipsClz:
+ __ Clz(i.OutputRegister(), i.InputRegister(0));
+ break;
case kMipsShl:
if (instr->InputAt(1)->IsRegister()) {
__ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -415,6 +712,18 @@
__ sra(i.OutputRegister(), i.InputRegister(0), imm);
}
break;
+ case kMipsExt:
+ __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ break;
+ case kMipsIns:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
+ } else {
+ __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ }
+ break;
case kMipsRor:
__ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -434,6 +743,56 @@
}
break;
+ case kMipsCmpS:
+ // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
+ break;
+ case kMipsAddS:
+ // TODO(plind): add special case: combine mult & add.
+ __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsSubS:
+ __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsMulS:
+ // TODO(plind): add special case: right op is -1.0, see arm port.
+ __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsDivS:
+ __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsModS: {
+ // TODO(bmeurer): We should really get rid of this special instruction,
+ // and generate a CallAddress instruction instead.
+ FrameScope scope(masm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2, kScratchReg);
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(i.OutputSingleRegister());
+ break;
+ }
+ case kMipsAbsS:
+ __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kMipsSqrtS: {
+ __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kMipsMaxS:
+ __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsMinS:
+ __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kMipsCmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
@@ -468,20 +827,107 @@
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
- case kMipsFloat64Floor: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
+ case kMipsAbsD:
+ __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMipsSqrtD: {
+ __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
- case kMipsFloat64Ceil: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+ case kMipsMaxD:
+ __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsMinD:
+ __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kMipsFloat64RoundDown: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
+ break;
+ }
+ case kMipsFloat32RoundDown: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
break;
}
case kMipsFloat64RoundTruncate: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
break;
}
- case kMipsSqrtD: {
- __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ case kMipsFloat32RoundTruncate: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
+ break;
+ }
+ case kMipsFloat64RoundUp: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
+ break;
+ }
+ case kMipsFloat32RoundUp: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
+ break;
+ }
+ case kMipsFloat64RoundTiesEven: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
+ break;
+ }
+ case kMipsFloat32RoundTiesEven: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
+ break;
+ }
+ case kMipsFloat64Max: {
+ // (b < a) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMipsFloat64Min: {
+ // (a < b) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMipsFloat32Max: {
+ // (b < a) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMipsFloat32Min: {
+ // (a < b) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
break;
}
case kMipsCvtSD: {
@@ -498,11 +944,35 @@
__ cvt_d_w(i.OutputDoubleRegister(), scratch);
break;
}
+ case kMipsCvtSW: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ mtc1(i.InputRegister(0), scratch);
+ __ cvt_s_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
case kMipsCvtDUw: {
FPURegister scratch = kScratchDoubleReg;
__ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
break;
}
+ case kMipsFloorWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ floor_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsCeilWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsRoundWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
case kMipsTruncWD: {
FPURegister scratch = kScratchDoubleReg;
// Other arches use round to zero here, so we follow.
@@ -510,12 +980,48 @@
__ mfc1(i.OutputRegister(), scratch);
break;
}
+ case kMipsFloorWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ floor_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsCeilWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsRoundWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsTruncWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ trunc_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
case kMipsTruncUwD: {
FPURegister scratch = kScratchDoubleReg;
// TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
break;
}
+ case kMipsFloat64ExtractLowWord32:
+ __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMipsFloat64ExtractHighWord32:
+ __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMipsFloat64InsertLowWord32:
+ __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ case kMipsFloat64InsertHighWord32:
+ __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
// ... more basic instructions ...
case kMipsLbu:
@@ -547,7 +1053,7 @@
break;
}
case kMipsSwc1: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
__ swc1(i.InputSingleRegister(index), operand);
break;
@@ -559,28 +1065,26 @@
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
case kMipsPush:
- __ Push(i.InputRegister(0));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Subu(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ __ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
break;
case kMipsStackClaim: {
- int words = MiscField::decode(instr->opcode());
- __ Subu(sp, sp, Operand(words << kPointerSizeLog2));
+ __ Subu(sp, sp, Operand(i.InputInt32(0)));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
break;
}
case kMipsStoreToStackSlot: {
- int slot = MiscField::decode(instr->opcode());
- __ sw(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
- break;
- }
- case kMipsStoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ addu(index, object, index);
- __ sw(value, MemOperand(index));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- RAStatus ra_status = kRAHasNotBeenSaved;
- __ RecordWrite(object, index, value, ra_status, mode);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
break;
}
case kCheckedLoadInt8:
@@ -619,8 +1123,12 @@
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
break;
+ case kCheckedLoadWord64:
+ case kCheckedStoreWord64:
+ UNREACHABLE(); // currently unsupported checked int64 load/store.
+ break;
}
-}
+} // NOLINT(readability/fn_size)
#define UNSUPPORTED_COND(opcode, condition) \
@@ -628,137 +1136,113 @@
out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
UNIMPLEMENTED();
+static bool convertCondition(FlagsCondition condition, Condition& cc) {
+ switch (condition) {
+ case kEqual:
+ cc = eq;
+ return true;
+ case kNotEqual:
+ cc = ne;
+ return true;
+ case kUnsignedLessThan:
+ cc = lt;
+ return true;
+ case kUnsignedGreaterThanOrEqual:
+ cc = uge;
+ return true;
+ case kUnsignedLessThanOrEqual:
+ cc = le;
+ return true;
+ case kUnsignedGreaterThan:
+ cc = ugt;
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
MipsOperandConverter i(this, instr);
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
Condition cc = kNoCondition;
-
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips pseudo-instructions, which are handled here by branch
// instructions that do the actual comparison. Essential that the input
// registers to compare pseudo-op are not modified before this branch op, as
// they are tested here.
- // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
- // not separated by other instructions.
if (instr->arch_opcode() == kMipsTst) {
- switch (branch->condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMipsTst, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionTst(branch->condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
-
- } else if (instr->arch_opcode() == kMipsAddOvf ||
- instr->arch_opcode() == kMipsSubOvf) {
- // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
+ } else if (instr->arch_opcode() == kMipsAddOvf) {
switch (branch->condition) {
case kOverflow:
- cc = lt;
+ __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
break;
case kNotOverflow:
- cc = ge;
+ __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
break;
default:
UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
break;
}
- __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
-
+ } else if (instr->arch_opcode() == kMipsSubOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+ break;
+ }
} else if (instr->arch_opcode() == kMipsCmp) {
- switch (branch->condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
- default:
- UNSUPPORTED_COND(kMipsCmp, branch->condition);
- break;
- }
+ cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
-
- } else if (instr->arch_opcode() == kMipsCmpD) {
- // TODO(dusmil) optimize unordered checks to use fewer instructions
- // even if we have to unfold BranchF macro.
- Label* nan = flabel;
- switch (branch->condition) {
- case kUnorderedEqual:
- cc = eq;
- break;
- case kUnorderedNotEqual:
- cc = ne;
- nan = tlabel;
- break;
- case kUnorderedLessThan:
- cc = lt;
- break;
- case kUnorderedGreaterThanOrEqual:
- cc = ge;
- nan = tlabel;
- break;
- case kUnorderedLessThanOrEqual:
- cc = le;
- break;
- case kUnorderedGreaterThan:
- cc = gt;
- nan = tlabel;
- break;
- default:
- UNSUPPORTED_COND(kMipsCmpD, branch->condition);
- break;
+ } else if (instr->arch_opcode() == kMipsCmpS) {
+ if (!convertCondition(branch->condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpS, branch->condition);
}
- __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
-
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ BranchF32(tlabel, nullptr, cc, left, right);
+ } else if (instr->arch_opcode() == kMipsCmpD) {
+ if (!convertCondition(branch->condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpD, branch->condition);
+ }
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ BranchF64(tlabel, nullptr, cc, left, right);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
UNIMPLEMENTED();
}
+ if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
}
@@ -772,224 +1256,291 @@
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label false_value;
- DCHECK_NE(0, instr->OutputCount());
+ DCHECK_NE(0u, instr->OutputCount());
Register result = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = kNoCondition;
-
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips psuedo-instructions, which are checked and handled here.
- // For materializations, we use delay slot to set the result true, and
- // in the false case, where we fall thru the branch, we reset the result
- // false.
-
- // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
- // not separated by other instructions.
if (instr->arch_opcode() == kMipsTst) {
- switch (condition) {
- case kNotEqual:
- cc = ne;
- break;
- case kEqual:
- cc = eq;
- break;
- default:
- UNSUPPORTED_COND(kMipsTst, condition);
- break;
+ cc = FlagsConditionToConditionTst(condition);
+ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
+ __ Sltu(result, zero_reg, kScratchReg);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
}
- __ And(at, i.InputRegister(0), i.InputOperand(1));
- __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
- __ li(result, Operand(1)); // In delay slot.
-
+ return;
} else if (instr->arch_opcode() == kMipsAddOvf ||
instr->arch_opcode() == kMipsSubOvf) {
- // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
- switch (condition) {
- case kOverflow:
- cc = lt;
+ Label flabel, tlabel;
+ switch (instr->arch_opcode()) {
+ case kMipsAddOvf:
+ __ AddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
+
break;
- case kNotOverflow:
- cc = ge;
+ case kMipsSubOvf:
+ __ SubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
break;
default:
- UNSUPPORTED_COND(kMipsAddOvf, condition);
+ UNREACHABLE();
break;
}
- __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
- __ li(result, Operand(1)); // In delay slot.
-
-
+ __ li(result, 1);
+ __ Branch(&tlabel);
+ __ bind(&flabel);
+ __ li(result, 0);
+ __ bind(&tlabel);
} else if (instr->arch_opcode() == kMipsCmp) {
- Register left = i.InputRegister(0);
- Operand right = i.InputOperand(1);
- switch (condition) {
- case kEqual:
- cc = eq;
- break;
- case kNotEqual:
- cc = ne;
- break;
- case kSignedLessThan:
- cc = lt;
- break;
- case kSignedGreaterThanOrEqual:
- cc = ge;
- break;
- case kSignedLessThanOrEqual:
- cc = le;
- break;
- case kSignedGreaterThan:
- cc = gt;
- break;
- case kUnsignedLessThan:
- cc = lo;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = hs;
- break;
- case kUnsignedLessThanOrEqual:
- cc = ls;
- break;
- case kUnsignedGreaterThan:
- cc = hi;
- break;
+ cc = FlagsConditionToConditionCmp(condition);
+ switch (cc) {
+ case eq:
+ case ne: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ Register select;
+ if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) {
+ // Pass left operand if right is zero.
+ select = left;
+ } else {
+ __ Subu(kScratchReg, left, right);
+ select = kScratchReg;
+ }
+ __ Sltu(result, zero_reg, select);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
+ }
+ } break;
+ case lt:
+ case ge: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Slt(result, left, right);
+ if (cc == ge) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case gt:
+ case le: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Slt(result, left, right);
+ if (cc == le) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case lo:
+ case hs: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Sltu(result, left, right);
+ if (cc == hs) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case hi:
+ case ls: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Sltu(result, left, right);
+ if (cc == ls) {
+ __ xori(result, result, 1);
+ }
+ } break;
default:
- UNSUPPORTED_COND(kMipsCmp, condition);
- break;
+ UNREACHABLE();
}
- __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
- __ li(result, Operand(1)); // In delay slot.
-
- } else if (instr->arch_opcode() == kMipsCmpD) {
- FPURegister left = i.InputDoubleRegister(0);
- FPURegister right = i.InputDoubleRegister(1);
- // TODO(plind): Provide NaN-testing macro-asm function without need for
- // BranchF.
- FPURegister dummy1 = f0;
- FPURegister dummy2 = f2;
- switch (condition) {
- case kUnorderedEqual:
- // TODO(plind): improve the NaN testing throughout this function.
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = eq;
- break;
- case kUnorderedNotEqual:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = ne;
- break;
- case kUnorderedLessThan:
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = lt;
- break;
- case kUnorderedGreaterThanOrEqual:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = ge;
- break;
- case kUnorderedLessThanOrEqual:
- __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
- cc = le;
- break;
- case kUnorderedGreaterThan:
- __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
- __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
- cc = gt;
- break;
- default:
- UNSUPPORTED_COND(kMipsCmp, condition);
- break;
+ return;
+ } else if (instr->arch_opcode() == kMipsCmpD ||
+ instr->arch_opcode() == kMipsCmpS) {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
}
- __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
- __ li(result, Operand(1)); // In delay slot - branch taken returns 1.
- // Fall-thru (branch not taken) returns 0.
-
+ bool predicate;
+ FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ __ li(result, Operand(1));
+ if (instr->arch_opcode() == kMipsCmpD) {
+ __ c(cc, D, left, right);
+ } else {
+ DCHECK(instr->arch_opcode() == kMipsCmpS);
+ __ c(cc, S, left, right);
+ }
+ if (predicate) {
+ __ Movf(result, zero_reg);
+ } else {
+ __ Movt(result, zero_reg);
+ }
+ } else {
+ if (instr->arch_opcode() == kMipsCmpD) {
+ __ cmp(cc, L, kDoubleCompareReg, left, right);
+ } else {
+ DCHECK(instr->arch_opcode() == kMipsCmpS);
+ __ cmp(cc, W, kDoubleCompareReg, left, right);
+ }
+ __ mfc1(result, kDoubleCompareReg);
+ __ andi(result, result, 1); // Cmp returns all 1's/0's, use only LSB.
+ if (!predicate) // Toggle result for not equal.
+ __ xori(result, result, 1);
+ }
+ return;
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
instr->arch_opcode());
TRACE_UNIMPL();
UNIMPLEMENTED();
}
- // Fallthru case is the false materialization.
- __ bind(&false_value);
- __ li(result, Operand(0));
- __ bind(&done);
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ li(at, Operand(i.InputInt32(index + 0)));
+ __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
+ }
+ __ nop(); // Branch delay slot of the last beq.
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ MipsOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+ Label here;
+ __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
+ __ BlockTrampolinePoolFor(case_count + 6);
+ __ bal(&here);
+ __ sll(at, input, 2); // Branch delay slot.
+ __ bind(&here);
+ __ addu(at, at, ra);
+ __ lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+ __ jr(at);
+ __ nop(); // Branch delay slot nop.
+ for (size_t index = 0; index < case_count; ++index) {
+ __ dd(GetLabel(i.InputRpo(index + 2)));
+ }
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ int stack_shrink_slots = frame()->GetSpillSlotCount();
+ if (descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
__ mov(fp, sp);
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- // TODO(plind): make callee save size const, possibly DCHECK it.
- int register_save_area_size = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- register_save_area_size += kPointerSize;
- }
- frame()->SetRegisterSaveAreaSize(register_save_area_size);
- __ MultiPush(saves);
- }
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
- frame()->SetRegisterSaveAreaSize(
- StandardFrameConstants::kFixedFrameSizeFromFp);
+ } else {
+ frame()->SetElidedFrameSizeInSlots(0);
}
- int stack_slots = frame()->GetSpillSlotCount();
- if (stack_slots > 0) {
- __ Subu(sp, sp, Operand(stack_slots * kPointerSize));
+ frame_access_state()->SetFrameAccessToDefault();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
+
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
+ }
+ if (stack_shrink_slots > 0) {
+ __ Subu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ }
+
+ // Save callee-saved FPU registers.
+ if (saves_fpu != 0) {
+ __ MultiPushFPU(saves_fpu);
+ int count = base::bits::CountPopulation32(saves_fpu);
+ DCHECK(kNumCalleeSavedFPU == count);
+ frame()->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kPointerSize));
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // Save callee-saved registers.
+ __ MultiPush(saves);
+ // kNumCalleeSaved includes the fp register, but the fp register
+ // is saved separately in TF.
+ int count = base::bits::CountPopulation32(saves);
+ DCHECK(kNumCalleeSaved == count + 1);
+ frame()->AllocateSavedCalleeRegisterSlots(count);
}
}
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
- if (frame()->GetRegisterSaveAreaSize() > 0) {
- // Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
- if (stack_slots > 0) {
- __ Addu(sp, sp, Operand(stack_slots * kPointerSize));
- }
- // Restore registers.
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) {
- __ MultiPop(saves);
- }
+ int pop_count = static_cast<int>(descriptor->StackParameterCount());
+
+ // Restore GP registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore FPU registers.
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ __ MultiPopFPU(saves_fpu);
+ }
+
+ if (descriptor->IsCFunctionCall()) {
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+ } else if (frame()->needs_frame()) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
}
- __ mov(sp, fp);
- __ Pop(ra, fp);
- __ Ret();
- } else {
- __ mov(sp, fp);
- __ Pop(ra, fp);
- int pop_count = descriptor->IsJSFunctionCall()
- ? static_cast<int>(descriptor->JSParameterCount())
- : 0;
+ }
+ if (pop_count != 0) {
__ DropAndRet(pop_count);
+ } else {
+ __ Ret();
}
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1031,9 +1582,19 @@
case Constant::kExternalReference:
__ li(dst, Operand(src.ToExternalReference()));
break;
- case Constant::kHeapObject:
- __ li(dst, src.ToHeapObject());
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ Heap::RootListIndex index;
+ int offset;
+ if (IsMaterializableFromFrame(src_object, &offset)) {
+ __ lw(dst, MemOperand(fp, offset));
+ } else if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ li(dst, src_object);
+ }
break;
+ }
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
break;
@@ -1085,7 +1646,7 @@
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1134,9 +1695,9 @@
Register temp_0 = kScratchReg;
FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
- MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
+ MemOperand src1(src0.rm(), src0.offset() + kIntSize);
MemOperand dst0 = g.ToMemOperand(destination);
- MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
+ MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
__ ldc1(temp_1, dst0); // Save destination in temp_1.
__ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
__ sw(temp_0, dst0);
@@ -1150,6 +1711,12 @@
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 32-bit MIPS we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() {
// Unused on 32-bit ARM. Still exists on 64-bit arm.
// TODO(plind): Unclear when this is called now. Understand, fix if needed.
@@ -1158,24 +1725,25 @@
void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- // Block tramoline pool emission for duration of padding.
- v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= v8::internal::Assembler::kInstrSize;
- }
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block tramoline pool emission for duration of padding.
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
}
}
- MarkLazyDeoptSite();
}
#undef __
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index 3aa508f..c938177 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -25,30 +25,60 @@
V(MipsModU) \
V(MipsAnd) \
V(MipsOr) \
+ V(MipsNor) \
V(MipsXor) \
+ V(MipsClz) \
V(MipsShl) \
V(MipsShr) \
V(MipsSar) \
+ V(MipsExt) \
+ V(MipsIns) \
V(MipsRor) \
V(MipsMov) \
V(MipsTst) \
V(MipsCmp) \
+ V(MipsCmpS) \
+ V(MipsAddS) \
+ V(MipsSubS) \
+ V(MipsMulS) \
+ V(MipsDivS) \
+ V(MipsModS) \
+ V(MipsAbsS) \
+ V(MipsSqrtS) \
+ V(MipsMaxS) \
+ V(MipsMinS) \
V(MipsCmpD) \
V(MipsAddD) \
V(MipsSubD) \
V(MipsMulD) \
V(MipsDivD) \
V(MipsModD) \
+ V(MipsAbsD) \
V(MipsSqrtD) \
- V(MipsFloat64Floor) \
- V(MipsFloat64Ceil) \
+ V(MipsMaxD) \
+ V(MipsMinD) \
+ V(MipsFloat32RoundDown) \
+ V(MipsFloat32RoundTruncate) \
+ V(MipsFloat32RoundUp) \
+ V(MipsFloat32RoundTiesEven) \
+ V(MipsFloat64RoundDown) \
V(MipsFloat64RoundTruncate) \
+ V(MipsFloat64RoundUp) \
+ V(MipsFloat64RoundTiesEven) \
V(MipsCvtSD) \
V(MipsCvtDS) \
V(MipsTruncWD) \
+ V(MipsRoundWD) \
+ V(MipsFloorWD) \
+ V(MipsCeilWD) \
+ V(MipsTruncWS) \
+ V(MipsRoundWS) \
+ V(MipsFloorWS) \
+ V(MipsCeilWS) \
V(MipsTruncUwD) \
V(MipsCvtDW) \
V(MipsCvtDUw) \
+ V(MipsCvtSW) \
V(MipsLb) \
V(MipsLbu) \
V(MipsSb) \
@@ -61,10 +91,17 @@
V(MipsSwc1) \
V(MipsLdc1) \
V(MipsSdc1) \
+ V(MipsFloat64ExtractLowWord32) \
+ V(MipsFloat64ExtractHighWord32) \
+ V(MipsFloat64InsertLowWord32) \
+ V(MipsFloat64InsertHighWord32) \
+ V(MipsFloat64Max) \
+ V(MipsFloat64Min) \
+ V(MipsFloat32Max) \
+ V(MipsFloat32Min) \
V(MipsPush) \
V(MipsStoreToStackSlot) \
- V(MipsStackClaim) \
- V(MipsStoreWriteBarrier)
+ V(MipsStackClaim)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/src/compiler/mips/instruction-scheduler-mips.cc b/src/compiler/mips/instruction-scheduler-mips.cc
new file mode 100644
index 0000000..af86a87
--- /dev/null
+++ b/src/compiler/mips/instruction-scheduler-mips.cc
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNIMPLEMENTED();
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNIMPLEMENTED();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index 5e8e3b1..61cea76 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/adapters.h"
#include "src/base/bits.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -17,12 +19,12 @@
// Adds Mips-specific methods for generating InstructionOperands.
-class MipsOperandGenerator FINAL : public OperandGenerator {
+class MipsOperandGenerator final : public OperandGenerator {
public:
explicit MipsOperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
- InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+ InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
if (CanBeImmediate(node, opcode)) {
return UseImmediate(node);
}
@@ -42,11 +44,10 @@
return is_uint16(value);
case kMipsLdc1:
case kMipsSdc1:
- case kCheckedLoadFloat32:
case kCheckedLoadFloat64:
- case kCheckedStoreFloat32:
case kCheckedStoreFloat64:
- return is_int16(value + kIntSize);
+ return std::numeric_limits<int16_t>::min() <= (value + kIntSize) &&
+ std::numeric_limits<int16_t>::max() >= (value + kIntSize);
default:
return is_int16(value);
}
@@ -90,9 +91,9 @@
InstructionCode opcode, FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
inputs[input_count++] = g.UseRegister(m.left().node());
@@ -108,14 +109,13 @@
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -127,32 +127,32 @@
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kMipsLwc1;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kMipsLdc1;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeUint32 ? kMipsLbu : kMipsLb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
break;
- case kRepWord16:
- opcode = typ == kTypeUint32 ? kMipsLhu : kMipsLh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kMipsLw;
break;
- default:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -161,7 +161,7 @@
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else {
- InstructionOperand* addr_reg = g.TempRegister();
+ InstructionOperand addr_reg = g.TempRegister();
Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
@@ -177,59 +177,126 @@
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
- Emit(kMipsStoreWriteBarrier, NULL, g.UseFixed(base, t0),
- g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kMipsSwc1;
- break;
- case kRepFloat64:
- opcode = kMipsSdc1;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kMipsSb;
- break;
- case kRepWord16:
- opcode = kMipsSh;
- break;
- case kRepTagged: // Fall through.
- case kRepWord32:
- opcode = kMipsSw;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ // TODO(mips): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- InstructionOperand* addr_reg = g.TempRegister();
- Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg,
- g.TempImmediate(0), g.UseRegister(value));
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kMipsSwc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMipsSdc1;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kMipsSb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMipsSh;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kMipsSw;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
}
}
void InstructionSelector::VisitWord32And(Node* node) {
+ MipsOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+
+ // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().Value() & 0x1f;
+
+ // Ext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Ext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 32) mask_width = 32 - lsb;
+
+ Emit(kMipsExt, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t shift = base::bits::CountPopulation32(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros32(~mask);
+ if (shift != 0 && shift != 32 && msb + shift == 32) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of invereted mask.
+ Emit(kMipsIns, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0), g.TempImmediate(shift));
+ return;
+ }
+ }
VisitBinop(this, node, kMipsAnd);
}
@@ -240,16 +307,81 @@
void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasValue()) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsNor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ MipsOperandGenerator g(this);
+ Emit(kMipsNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
VisitBinop(this, node, kMipsXor);
}
void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ MipsOperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasValue()) {
+ uint32_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kMipsShl, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kMipsShl, node);
}
void InstructionSelector::VisitWord32Shr(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x1f;
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation32(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_msb + mask_width + lsb) == 32) {
+ MipsOperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+ Emit(kMipsExt, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMipsShr, node);
}
@@ -264,6 +396,17 @@
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ VisitRR(this, kMipsClz, node);
+}
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
MipsOperandGenerator g(this);
@@ -289,7 +432,7 @@
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
@@ -298,7 +441,7 @@
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value + 1)));
@@ -307,15 +450,12 @@
return;
}
}
- Emit(kMipsMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.UseRegister(m.right().node()));
+ VisitRRR(this, kMipsMul, node);
}
void InstructionSelector::VisitInt32MulHigh(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsMulHigh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)));
+ VisitRRR(this, kMipsMulHigh, node);
}
@@ -329,7 +469,7 @@
void InstructionSelector::VisitInt32Div(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMipsDiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMipsDiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -337,7 +477,7 @@
void InstructionSelector::VisitUint32Div(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMipsDivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMipsDivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -359,39 +499,130 @@
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsCvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMipsCvtDS, node);
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsCvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMipsCvtDW, node);
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsCvtDUw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMipsCvtDUw, node);
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
MipsOperandGenerator g(this);
- Emit(kMipsTruncWD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ Node* value = node->InputAt(0);
+ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
+ // which does rounding and conversion to integer format.
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kFloat64RoundDown:
+ Emit(kMipsFloorWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundUp:
+ Emit(kMipsCeilWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTiesEven:
+ Emit(kMipsRoundWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTruncate:
+ Emit(kMipsTruncWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ default:
+ break;
+ }
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (CanCover(value, next)) {
+ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
+ switch (next->opcode()) {
+ case IrOpcode::kFloat32RoundDown:
+ Emit(kMipsFloorWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundUp:
+ Emit(kMipsCeilWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTiesEven:
+ Emit(kMipsRoundWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTruncate:
+ Emit(kMipsTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ default:
+ Emit(kMipsTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ } else {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kMipsTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
+ VisitRR(this, kMipsTruncWD, node);
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsTruncUwD, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMipsTruncUwD, node);
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
MipsOperandGenerator g(this);
- Emit(kMipsCvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kMipsCvtSW, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ VisitRR(this, kMipsCvtSD, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, kArchTruncateDoubleToI, node);
+ case TruncationMode::kRoundToZero:
+ return VisitRR(this, kMipsTruncWD, node);
+ }
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kMipsFloat64ExtractLowWord32, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRRR(this, kMipsAddS, node);
}
@@ -400,16 +631,45 @@
}
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ VisitRRR(this, kMipsSubS, node);
+}
+
+
void InstructionSelector::VisitFloat64Sub(Node* node) {
+ MipsOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kMipsFloat64RoundUp, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
VisitRRR(this, kMipsSubD, node);
}
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kMipsMulS, node);
+}
+
+
void InstructionSelector::VisitFloat64Mul(Node* node) {
VisitRRR(this, kMipsMulD, node);
}
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kMipsDivS, node);
+}
+
+
void InstructionSelector::VisitFloat64Div(Node* node) {
VisitRRR(this, kMipsDivD, node);
}
@@ -422,19 +682,108 @@
}
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+void InstructionSelector::VisitFloat32Max(Node* node) {
MipsOperandGenerator g(this);
- Emit(kMipsSqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat32Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat32Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- VisitRR(this, kMipsFloat64Floor, node);
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat64Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat64Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
}
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- VisitRR(this, kMipsFloat64Ceil, node);
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat32Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat32Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat64Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat64Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kMipsAbsS, node);
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kMipsAbsD, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kMipsSqrtS, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRR(this, kMipsSqrtD, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kMipsFloat32RoundDown, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kMipsFloat64RoundDown, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kMipsFloat32RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kMipsFloat64RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kMipsFloat32RoundTruncate, node);
}
@@ -448,96 +797,94 @@
}
-void InstructionSelector::VisitCall(Node* node) {
- MipsOperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = NULL;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
- // Possibly align stack here for functions.
- int push_count = buffer.pushed_nodes.size();
- if (push_count > 0) {
- Emit(kMipsStackClaim | MiscField::encode(push_count), NULL);
- }
- int slot = buffer.pushed_nodes.size() - 1;
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
- Emit(kMipsStoreToStackSlot | MiscField::encode(slot), NULL,
- g.UseRegister(*input));
- slot--;
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- InstructionOperand** first_output =
- buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
- Instruction* call_instr =
- Emit(opcode, buffer.outputs.size(), first_output,
- buffer.instruction_args.size(), &buffer.instruction_args.front());
- call_instr->MarkAsCall();
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kMipsFloat32RoundTiesEven, node);
}
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kMipsFloat64RoundTiesEven, node);
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ MipsOperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = kCArgSlotCount;
+ for (PushParameter input : (*arguments)) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ // Possibly align stack here for functions.
+ int push_count = static_cast<int>(descriptor->StackParameterCount());
+ if (push_count > 0) {
+ Emit(kMipsStackClaim, g.NoOutput(),
+ g.TempImmediate(push_count << kPointerSizeLog2));
+ }
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(n << kPointerSizeLog2));
+ }
+ }
+ }
+}
+
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
+ InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
- InstructionOperand* length_operand =
- (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
+ InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
+ ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), offset_operand, length_operand,
@@ -546,45 +893,46 @@
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
default:
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
+ InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
- InstructionOperand* length_operand =
- (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
+ InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
+ ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
- Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr, offset_operand,
- length_operand, g.UseRegister(value), g.UseRegister(buffer));
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ offset_operand, length_operand, g.UseRegister(value),
+ g.UseRegister(buffer));
}
@@ -592,29 +940,47 @@
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand* left, InstructionOperand* right,
+ InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
- selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
- // TODO(plind): Revisit and test this path.
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
}
}
-// Shared routine for multiple float compare operations.
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ MipsOperandGenerator g(selector);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMipsCmpS, lhs, rhs, cont);
+}
+
+
+// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kMipsCmpD, g.UseRegister(left), g.UseRegister(right),
- cont);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMipsCmpD, lhs, rhs, cont);
}
@@ -628,12 +994,52 @@
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, opcode)) {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
- cont);
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
} else if (g.CanBeImmediate(left, opcode)) {
if (!commutative) cont->Commute();
- VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
- cont);
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
} else {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
cont);
@@ -679,26 +1085,35 @@
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWordCompare(selector, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
- cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
- Node* const result = node->FindProjection(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
if (!result || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
@@ -724,11 +1139,10 @@
// Continuation could not be combined with a compare, emit compare against 0.
MipsOperandGenerator g(selector);
InstructionCode const opcode = cont->Encode(kMipsCmp);
- InstructionOperand* const value_operand = g.UseRegister(value);
+ InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
- selector->Emit(opcode, nullptr, value_operand, g.TempImmediate(0),
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@@ -743,6 +1157,34 @@
}
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ MipsOperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 9 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kMipsSub, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+
+ // Generate a sequence of conditional jumps.
+ return EmitLookupSwitch(sw, value_operand);
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
@@ -778,7 +1220,7 @@
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMipsAddOvf, &cont);
}
@@ -788,7 +1230,7 @@
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMipsSubOvf, &cont);
}
@@ -797,33 +1239,96 @@
}
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kMipsFloat64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kMipsFloat64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
- MachineOperatorBuilder::kFloat64RoundTruncate;
+ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
+ flags |= MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
}
- return MachineOperatorBuilder::kNoFlags;
+ return flags | MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
}
} // namespace compiler
diff --git a/src/compiler/mips/linkage-mips.cc b/src/compiler/mips/linkage-mips.cc
deleted file mode 100644
index 2b314a2..0000000
--- a/src/compiler/mips/linkage-mips.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/linkage-impl.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct MipsLinkageHelperTraits {
- static Register ReturnValueReg() { return v0; }
- static Register ReturnValue2Reg() { return v1; }
- static Register JSCallFunctionReg() { return a1; }
- static Register ContextReg() { return cp; }
- static Register RuntimeCallFunctionReg() { return a1; }
- static Register RuntimeCallArgCountReg() { return a0; }
- static RegList CCalleeSaveRegisters() {
- return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
- s6.bit() | s7.bit();
- }
- static Register CRegisterParameter(int i) {
- static Register register_parameters[] = {a0, a1, a2, a3};
- return register_parameters[i];
- }
- static int CRegisterParametersLength() { return 4; }
-};
-
-
-typedef LinkageHelper<MipsLinkageHelperTraits> LH;
-
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
- CallDescriptor::Flags flags) {
- return LH::GetJSCallDescriptor(zone, parameter_count, flags);
-}
-
-
-CallDescriptor* Linkage::GetRuntimeCallDescriptor(
- Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, Zone* zone) {
- return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
- properties);
-}
-
-
-CallDescriptor* Linkage::GetStubCallDescriptor(
- const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
- CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
- return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
- flags, properties);
-}
-
-
-CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
- MachineSignature* sig) {
- return LH::GetSimplifiedCDescriptor(zone, sig);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8