Merge V8 5.2.361.47 DO NOT MERGE
https://chromium.googlesource.com/v8/v8/+/5.2.361.47
FPIIM-449
Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index a0b5022..2c9415e 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -149,7 +149,7 @@
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
@@ -218,7 +218,8 @@
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
Register value, Register scratch0, Register scratch1,
@@ -388,12 +389,25 @@
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), \
+ MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+ __ dmb(ISH); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ dmb(ISH); \
+ __ asm_instr(i.InputRegister(2), \
+ MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+ __ dmb(ISH); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta > 0) {
@@ -445,7 +459,8 @@
}
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
ArmOperandConverter i(this, instr);
__ MaybeCheckConstPool();
@@ -488,6 +503,14 @@
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallAddress: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
@@ -571,7 +594,9 @@
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -856,7 +881,7 @@
}
break;
case kArmVcmpF32:
- if (instr->InputAt(1)->IsDoubleRegister()) {
+ if (instr->InputAt(1)->IsFPRegister()) {
__ VFPCompareAndSetFlags(i.InputFloat32Register(0),
i.InputFloat32Register(1));
} else {
@@ -907,7 +932,7 @@
__ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVcmpF64:
- if (instr->InputAt(1)->IsDoubleRegister()) {
+ if (instr->InputAt(1)->IsFPRegister()) {
__ VFPCompareAndSetFlags(i.InputFloat64Register(0),
i.InputFloat64Register(1));
} else {
@@ -1146,8 +1171,48 @@
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmFloat32Max: {
+ CpuFeatureScope scope(masm(), ARMv8);
+ // (b < a) ? a : b
+ SwVfpRegister a = i.InputFloat32Register(0);
+ SwVfpRegister b = i.InputFloat32Register(1);
+ SwVfpRegister result = i.OutputFloat32Register(0);
+ __ VFPCompareAndSetFlags(a, b);
+ __ vsel(gt, result, a, b);
+ break;
+ }
+ case kArmFloat32Min: {
+ CpuFeatureScope scope(masm(), ARMv8);
+ // (a < b) ? a : b
+ SwVfpRegister a = i.InputFloat32Register(0);
+ SwVfpRegister b = i.InputFloat32Register(1);
+ SwVfpRegister result = i.OutputFloat32Register(0);
+ __ VFPCompareAndSetFlags(b, a);
+ __ vsel(gt, result, a, b);
+ break;
+ }
+ case kArmFloat64Max: {
+ CpuFeatureScope scope(masm(), ARMv8);
+ // (b < a) ? a : b
+ DwVfpRegister a = i.InputFloat64Register(0);
+ DwVfpRegister b = i.InputFloat64Register(1);
+ DwVfpRegister result = i.OutputFloat64Register(0);
+ __ VFPCompareAndSetFlags(a, b);
+ __ vsel(gt, result, a, b);
+ break;
+ }
+ case kArmFloat64Min: {
+ CpuFeatureScope scope(masm(), ARMv8);
+ // (a < b) ? a : b
+ DwVfpRegister a = i.InputFloat64Register(0);
+ DwVfpRegister b = i.InputFloat64Register(1);
+ DwVfpRegister result = i.OutputFloat64Register(0);
+ __ VFPCompareAndSetFlags(b, a);
+ __ vsel(gt, result, a, b);
+ break;
+ }
case kArmPush:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ vpush(i.InputDoubleRegister(0));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
@@ -1202,7 +1267,34 @@
case kCheckedStoreWord64:
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
+
+ case kAtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrb);
+ break;
+ case kAtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsh);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrh);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
+ break;
+
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(str);
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1263,20 +1355,47 @@
}
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
__ CheckConstPool(false, false);
+ return kSuccess;
}
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-void CodeGenerator::AssemblePrologue() {
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ frame->AlignSavedCalleeRegisterSlots();
+ }
+
+ if (saves_fp != 0) {
+ // Save callee-saved FP registers.
+ STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
+ uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
+ uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
+ DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
+ frame->AllocateSavedCalleeRegisterSlots((last - first + 1) *
+ (kDoubleSize / kPointerSize));
+ }
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
+ : descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // Save callee-saved registers.
+ frame->AllocateSavedCalleeRegisterSlots(
+ base::bits::CountPopulation32(saves));
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
@@ -1295,7 +1414,8 @@
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1306,15 +1426,12 @@
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) {
- stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
- }
- if (stack_shrink_slots > 0) {
- __ sub(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ sub(sp, sp, Operand(shrink_slots * kPointerSize));
}
if (saves_fp != 0) {
@@ -1325,8 +1442,6 @@
DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
__ vstm(db_w, sp, DwVfpRegister::from_code(first),
DwVfpRegister::from_code(last));
- frame()->AllocateSavedCalleeRegisterSlots((last - first + 1) *
- (kDoubleSize / kPointerSize));
}
const RegList saves = FLAG_enable_embedded_constant_pool
? (descriptor->CalleeSavedRegisters() & ~pp.bit())
@@ -1334,8 +1449,6 @@
if (saves != 0) {
// Save callee-saved registers.
__ stm(db_w, sp, saves);
- frame()->AllocateSavedCalleeRegisterSlots(
- base::bits::CountPopulation32(saves));
}
}
@@ -1408,7 +1521,12 @@
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- __ mov(dst, Operand(src.ToInt32()));
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ __ mov(dst, Operand(src.ToInt32(), src.rmode()));
+ } else {
+ __ mov(dst, Operand(src.ToInt32()));
+ }
break;
case Constant::kInt64:
UNREACHABLE();
@@ -1443,7 +1561,7 @@
}
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
__ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(ip, dst);
@@ -1453,27 +1571,27 @@
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
- DwVfpRegister dst = destination->IsDoubleRegister()
+ DwVfpRegister dst = destination->IsFPRegister()
? g.ToFloat64Register(destination)
: kScratchDoubleReg;
__ vmov(dst, src.ToFloat64(), kScratchReg);
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
__ vstr(dst, g.ToMemOperand(destination));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
__ vstr(src, g.ToMemOperand(destination));
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ vldr(g.ToDoubleRegister(destination), src);
} else {
DwVfpRegister temp = kScratchDoubleReg;
@@ -1517,23 +1635,23 @@
__ vldr(temp_1, dst);
__ str(temp_0, dst);
__ vstr(temp_1, src);
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
DwVfpRegister temp = kScratchDoubleReg;
DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
__ vldr(src, dst);
__ vstr(temp, dst);
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
DwVfpRegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
@@ -1559,11 +1677,6 @@
}
-void CodeGenerator::AddNopForSmiCodeInlining() {
- // On 32-bit ARM we do not insert nops for inlined Smi code.
-}
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index 5e6f5c9..fc371e0 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -101,6 +101,10 @@
V(ArmVstrF32) \
V(ArmVldrF64) \
V(ArmVstrF64) \
+ V(ArmFloat32Max) \
+ V(ArmFloat32Min) \
+ V(ArmFloat64Max) \
+ V(ArmFloat64Min) \
V(ArmLdrb) \
V(ArmLdrsb) \
V(ArmStrb) \
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc
index 466765e..ec28b72 100644
--- a/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/src/compiler/arm/instruction-scheduler-arm.cc
@@ -99,6 +99,10 @@
case kArmVmovHighU32F64:
case kArmVmovHighF64U32:
case kArmVmovF64U32U32:
+ case kArmFloat64Max:
+ case kArmFloat64Min:
+ case kArmFloat32Max:
+ case kArmFloat32Min:
return kNoOpcodeFlags;
case kArmVldrF32:
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index 76d9e3c..b2b1a70 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -1142,15 +1142,12 @@
VisitRR(this, kArmVcvtF32F64, node);
}
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kArmVcvtS32F64, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kArmVcvtS32F64, node);
}
@@ -1208,6 +1205,35 @@
VisitRRR(this, kArmVaddF64, node);
}
+namespace {
+void VisitFloat32SubHelper(InstructionSelector* selector, Node* node) {
+ ArmOperandGenerator g(selector);
+ Float32BinopMatcher m(node);
+ if (m.right().IsFloat32Mul() && selector->CanCover(node, m.right().node())) {
+ Float32BinopMatcher mright(m.right().node());
+ selector->Emit(kArmVmlsF32, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ VisitRRR(selector, kArmVsubF32, node);
+}
+
+void VisitFloat64SubHelper(InstructionSelector* selector, Node* node) {
+ ArmOperandGenerator g(selector);
+ Float64BinopMatcher m(node);
+ if (m.right().IsFloat64Mul() && selector->CanCover(node, m.right().node())) {
+ Float64BinopMatcher mright(m.right().node());
+ selector->Emit(kArmVmlsF64, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ VisitRRR(selector, kArmVsubF64, node);
+}
+} // namespace
void InstructionSelector::VisitFloat32Sub(Node* node) {
ArmOperandGenerator g(this);
@@ -1217,16 +1243,12 @@
g.UseRegister(m.right().node()));
return;
}
- if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
- Float32BinopMatcher mright(m.right().node());
- Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
- g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
- VisitRRR(this, kArmVsubF32, node);
+ VisitFloat32SubHelper(this, node);
}
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+ VisitFloat32SubHelper(this, node);
+}
void InstructionSelector::VisitFloat64Sub(Node* node) {
ArmOperandGenerator g(this);
@@ -1248,16 +1270,12 @@
g.UseRegister(m.right().node()));
return;
}
- if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
- Float64BinopMatcher mright(m.right().node());
- Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
- g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
- VisitRRR(this, kArmVsubF64, node);
+ VisitFloat64SubHelper(this, node);
}
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+ VisitFloat64SubHelper(this, node);
+}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kArmVmulF32, node);
@@ -1285,18 +1303,25 @@
g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
}
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ DCHECK(IsSupported(ARMv8));
+ VisitRRR(this, kArmFloat32Max, node);
+}
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ DCHECK(IsSupported(ARMv8));
+ VisitRRR(this, kArmFloat64Max, node);
+}
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ DCHECK(IsSupported(ARMv8));
+ VisitRRR(this, kArmFloat32Min, node);
+}
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
-
-
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
-
-
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
-
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ DCHECK(IsSupported(ARMv8));
+ VisitRRR(this, kArmFloat64Min, node);
+}
void InstructionSelector::VisitFloat32Abs(Node* node) {
VisitRR(this, kArmVabsF32, node);
@@ -1807,6 +1832,61 @@
g.UseRegister(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags
@@ -1826,7 +1906,11 @@
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kFloat32RoundTiesEven |
- MachineOperatorBuilder::kFloat64RoundTiesEven;
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64Max;
}
return flags;
}