Merge V8 5.2.361.47
https://chromium.googlesource.com/v8/v8/+/5.2.361.47
Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index 9b0d706..c437d5e 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -119,7 +119,7 @@
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
@@ -472,13 +472,24 @@
__ bind(&done); \
}
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ sync(); \
+ __ asm_instr(i.InputRegister(2), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(sp, fp);
__ Pop(ra, fp);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta > 0) {
@@ -527,7 +538,8 @@
}
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
MipsOperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -564,6 +576,14 @@
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallAddress: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
@@ -641,7 +661,9 @@
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -839,6 +861,36 @@
__ sra(i.OutputRegister(), i.InputRegister(0), imm);
}
break;
+ case kMipsShlPair: {
+ if (instr->InputAt(2)->IsRegister()) {
+ __ ShlPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2));
+ } else {
+ uint32_t imm = i.InputOperand(2).immediate();
+ __ ShlPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), imm);
+ }
+ } break;
+ case kMipsShrPair: {
+ if (instr->InputAt(2)->IsRegister()) {
+ __ ShrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2));
+ } else {
+ uint32_t imm = i.InputOperand(2).immediate();
+ __ ShrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), imm);
+ }
+ } break;
+ case kMipsSarPair: {
+ if (instr->InputAt(2)->IsRegister()) {
+ __ SarPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2));
+ } else {
+ uint32_t imm = i.InputOperand(2).immediate();
+ __ SarPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), imm);
+ }
+ } break;
case kMipsExt:
__ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
@@ -869,7 +921,11 @@
__ li(i.OutputRegister(), i.InputOperand(0));
}
break;
-
+ case kMipsLsa:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2));
+ break;
case kMipsCmpS:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
@@ -923,6 +979,14 @@
case kMipsCmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
+ case kMipsAddPair:
+ __ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
+ break;
+ case kMipsSubPair:
+ __ SubPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
+ break;
case kMipsMulPair: {
__ Mulu(i.OutputRegister(1), i.OutputRegister(0), i.InputRegister(0),
i.InputRegister(2));
@@ -1212,7 +1276,7 @@
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
case kMipsPush:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1227,7 +1291,7 @@
break;
}
case kMipsStoreToStackSlot: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
} else {
__ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
@@ -1274,7 +1338,32 @@
case kCheckedStoreWord64:
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
+ case kAtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
+ break;
+ case kAtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
+ break;
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1569,18 +1658,40 @@
});
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
-
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::FinishFrame(Frame* frame) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ frame->AlignSavedCalleeRegisterSlots();
+ }
+
+ if (saves_fpu != 0) {
+ int count = base::bits::CountPopulation32(saves_fpu);
+ DCHECK(kNumCalleeSavedFPU == count);
+ frame->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kPointerSize));
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ int count = base::bits::CountPopulation32(saves);
+ DCHECK(kNumCalleeSaved == count + 1);
+ frame->AllocateSavedCalleeRegisterSlots(count);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
@@ -1592,6 +1703,8 @@
}
}
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1602,35 +1715,24 @@
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
- if (saves_fpu != 0) {
- stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
- }
- if (stack_shrink_slots > 0) {
- __ Subu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ Subu(sp, sp, Operand(shrink_slots * kPointerSize));
}
// Save callee-saved FPU registers.
if (saves_fpu != 0) {
__ MultiPushFPU(saves_fpu);
- int count = base::bits::CountPopulation32(saves_fpu);
- DCHECK(kNumCalleeSavedFPU == count);
- frame()->AllocateSavedCalleeRegisterSlots(count *
- (kDoubleSize / kPointerSize));
}
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
__ MultiPush(saves);
- // kNumCalleeSaved includes the fp register, but the fp register
- // is saved separately in TF.
- int count = base::bits::CountPopulation32(saves);
- DCHECK(kNumCalleeSaved == count + 1);
- frame()->AllocateSavedCalleeRegisterSlots(count);
+ DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
}
}
@@ -1701,7 +1803,12 @@
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- __ li(dst, Operand(src.ToInt32()));
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ __ li(dst, Operand(src.ToInt32(), src.rmode()));
+ } else {
+ __ li(dst, Operand(src.ToInt32()));
+ }
break;
case Constant::kFloat32:
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
@@ -1734,7 +1841,7 @@
}
if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
__ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ sw(at, dst);
@@ -1744,27 +1851,27 @@
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
- DoubleRegister dst = destination->IsDoubleRegister()
+ DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
__ Move(dst, src.ToFloat64());
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
__ sdc1(dst, g.ToMemOperand(destination));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
__ sdc1(src, g.ToMemOperand(destination));
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ ldc1(g.ToDoubleRegister(destination), src);
} else {
FPURegister temp = kScratchDoubleReg;
@@ -1808,23 +1915,23 @@
__ lw(temp_1, dst);
__ sw(temp_0, dst);
__ sw(temp_1, src);
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
FPURegister temp = kScratchDoubleReg;
FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
__ ldc1(src, dst);
__ sdc1(temp, dst);
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
@@ -1850,13 +1957,6 @@
}
-void CodeGenerator::AddNopForSmiCodeInlining() {
- // Unused on 32-bit ARM. Still exists on 64-bit arm.
- // TODO(plind): Unclear when this is called now. Understand, fix if needed.
- __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
-}
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index d85c2a7..5c36525 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -30,9 +30,13 @@
V(MipsClz) \
V(MipsCtz) \
V(MipsPopcnt) \
+ V(MipsLsa) \
V(MipsShl) \
V(MipsShr) \
V(MipsSar) \
+ V(MipsShlPair) \
+ V(MipsShrPair) \
+ V(MipsSarPair) \
V(MipsExt) \
V(MipsIns) \
V(MipsRor) \
@@ -59,6 +63,8 @@
V(MipsSqrtD) \
V(MipsMaxD) \
V(MipsMinD) \
+ V(MipsAddPair) \
+ V(MipsSubPair) \
V(MipsMulPair) \
V(MipsFloat32RoundDown) \
V(MipsFloat32RoundTruncate) \
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index f86ffe7..cccb39a 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -395,27 +395,71 @@
VisitRRO(this, kMipsSar, node);
}
-void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
+static void VisitInt32PairBinop(InstructionSelector* selector,
+ InstructionCode opcode, Node* node) {
+ MipsOperandGenerator g(selector);
-void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitInt32PairMul(Node* node) {
- MipsOperandGenerator g(this);
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // register.
InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)),
g.UseUniqueRegister(node->InputAt(2)),
g.UseUniqueRegister(node->InputAt(3))};
+
InstructionOperand outputs[] = {
g.DefineAsRegister(node),
g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
- Emit(kMipsMulPair, 2, outputs, 4, inputs);
+ selector->Emit(opcode, 2, outputs, 4, inputs);
}
-void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+ VisitInt32PairBinop(this, kMipsAddPair, node);
+}
-void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+ VisitInt32PairBinop(this, kMipsSubPair, node);
+}
-void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+ VisitInt32PairBinop(this, kMipsMulPair, node);
+}
+
+// Shared routine for multiple shift operations.
+static void VisitWord32PairShift(InstructionSelector* selector,
+ InstructionCode opcode, Node* node) {
+ MipsOperandGenerator g(selector);
+ Int32Matcher m(node->InputAt(2));
+ InstructionOperand shift_operand;
+ if (m.HasValue()) {
+ shift_operand = g.UseImmediate(m.node());
+ } else {
+ shift_operand = g.UseUniqueRegister(m.node());
+ }
+
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // register.
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ shift_operand};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+ VisitWord32PairShift(this, kMipsShlPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+ VisitWord32PairShift(this, kMipsShrPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+ VisitWord32PairShift(this, kMipsSarPair, node);
+}
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kMipsRor, node);
@@ -444,8 +488,32 @@
void InstructionSelector::VisitInt32Add(Node* node) {
MipsOperandGenerator g(this);
+ Int32BinopMatcher m(node);
- // TODO(plind): Consider multiply & add optimization from arm port.
+ // Select Lsa for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+
+ // Select Lsa for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.right().node()),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+
VisitBinop(this, node, kMipsAdd);
}
@@ -467,12 +535,9 @@
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand temp = g.TempRegister();
- Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
+ Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
- Emit(kMipsAdd | AddressingModeField::encode(kMode_None),
- g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
@@ -654,17 +719,13 @@
VisitRR(this, kMipsCvtSD, node);
}
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kMipsTruncWD, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
}
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kMipsTruncWD, node);
+}
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kMipsFloat64ExtractLowWord32, node);
@@ -693,6 +754,9 @@
VisitRRR(this, kMipsSubS, node);
}
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+ VisitRRR(this, kMipsSubS, node);
+}
void InstructionSelector::VisitFloat64Sub(Node* node) {
MipsOperandGenerator g(this);
@@ -712,6 +776,9 @@
VisitRRR(this, kMipsSubD, node);
}
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+ VisitRRR(this, kMipsSubD, node);
+}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kMipsMulS, node);
@@ -999,7 +1066,6 @@
namespace {
-
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
@@ -1388,6 +1454,73 @@
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
+}
// static
MachineOperatorBuilder::Flags