Upgrade V8 to version 4.9.385.28
https://chromium.googlesource.com/v8/v8/+/4.9.385.28
FPIIM-449
Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index 35ad16b..1b12bd9 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/adapters.h"
#include "src/base/bits.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -17,12 +19,12 @@
// Adds Mips-specific methods for generating InstructionOperands.
-class Mips64OperandGenerator FINAL : public OperandGenerator {
+class Mips64OperandGenerator final : public OperandGenerator {
public:
explicit Mips64OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
- InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+ InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
if (CanBeImmediate(node, opcode)) {
return UseImmediate(node);
}
@@ -56,35 +58,6 @@
}
}
-
- bool CanBeImmediate(Node* node, InstructionCode opcode,
- FlagsContinuation* cont) {
- int64_t value;
- if (node->opcode() == IrOpcode::kInt32Constant)
- value = OpParameter<int32_t>(node);
- else if (node->opcode() == IrOpcode::kInt64Constant)
- value = OpParameter<int64_t>(node);
- else
- return false;
- switch (ArchOpcodeField::decode(opcode)) {
- case kMips64Cmp32:
- switch (cont->condition()) {
- case kUnsignedLessThan:
- case kUnsignedGreaterThanOrEqual:
- case kUnsignedLessThanOrEqual:
- case kUnsignedGreaterThan:
- // Immediate operands for unsigned 32-bit compare operations
- // should not be sign-extended.
- return is_uint15(value);
- default:
- return false;
- }
- default:
- return is_int16(value);
- }
- }
-
-
private:
bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
TRACE_UNIMPL();
@@ -123,9 +96,9 @@
InstructionCode opcode, FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* inputs[4];
+ InstructionOperand inputs[4];
size_t input_count = 0;
- InstructionOperand* outputs[2];
+ InstructionOperand outputs[2];
size_t output_count = 0;
inputs[input_count++] = g.UseRegister(m.left().node());
@@ -141,14 +114,13 @@
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, input_count);
- DCHECK_NE(0, output_count);
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -160,35 +132,34 @@
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kMips64Lwc1;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kMips64Ldc1;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeUint32 ? kMips64Lbu : kMips64Lb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
break;
- case kRepWord16:
- opcode = typ == kTypeUint32 ? kMips64Lhu : kMips64Lh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kMips64Lw;
break;
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kMips64Ld;
break;
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -197,7 +168,7 @@
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else {
- InstructionOperand* addr_reg = g.TempRegister();
+ InstructionOperand addr_reg = g.TempRegister();
Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
@@ -213,67 +184,180 @@
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
- Emit(kMips64StoreWriteBarrier, NULL, g.UseFixed(base, t0),
- g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kMips64Swc1;
- break;
- case kRepFloat64:
- opcode = kMips64Sdc1;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kMips64Sb;
- break;
- case kRepWord16:
- opcode = kMips64Sh;
- break;
- case kRepWord32:
- opcode = kMips64Sw;
- break;
- case kRepTagged: // Fall through.
- case kRepWord64:
- opcode = kMips64Sd;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ // TODO(mips): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- InstructionOperand* addr_reg = g.TempRegister();
- Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg,
- g.TempImmediate(0), g.UseRegister(value));
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kMips64Swc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMips64Sdc1;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kMips64Sb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMips64Sh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kMips64Sw;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kMips64Sd;
+ break;
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
}
}
void InstructionSelector::VisitWord32And(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+
+ // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().Value() & 0x1f;
+
+ // Ext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Ext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 32) mask_width = 32 - lsb;
+
+ Emit(kMips64Ext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t shift = base::bits::CountPopulation32(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros32(~mask);
+ if (shift != 0 && shift != 32 && msb + shift == 32) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask.
+ Emit(kMips64Ins, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
VisitBinop(this, node, kMips64And);
}
void InstructionSelector::VisitWord64And(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint64_t mask = m.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation64(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+
+ // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int64 shifts use `value % 64`.
+ uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
+
+ // Dext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Dext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 64) mask_width = 64 - lsb;
+
+ Emit(kMips64Dext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(static_cast<int32_t>(mask_width)));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasValue()) {
+ uint64_t mask = m.right().Value();
+ uint32_t shift = base::bits::CountPopulation64(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros64(~mask);
+ if (shift != 0 && shift < 32 && msb + shift == 64) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask. Dins cannot insert bits
+ // past word size, so shifts smaller than 32 are covered.
+ Emit(kMips64Dins, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
VisitBinop(this, node, kMips64And);
}
@@ -289,21 +373,105 @@
void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasValue()) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
VisitBinop(this, node, kMips64Xor);
}
void InstructionSelector::VisitWord64Xor(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasValue()) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
VisitBinop(this, node, kMips64Xor);
}
void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ Mips64OperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasValue()) {
+ uint32_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kMips64Shl, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kMips64Shl, node);
}
void InstructionSelector::VisitWord32Shr(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x1f;
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation32(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_msb + mask_width + lsb) == 32) {
+ Mips64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+ Emit(kMips64Ext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMips64Shr, node);
}
@@ -314,11 +482,67 @@
void InstructionSelector::VisitWord64Shl(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+ m.right().IsInRange(32, 63)) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+ // 32 bits anyway.
+ Emit(kMips64Dshl, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 63)) {
+ // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ uint64_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation64(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ uint64_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+ DCHECK_NE(0u, shift);
+
+ if ((shift + mask_width) >= 64) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kMips64Dshl, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kMips64Dshl, node);
}
void InstructionSelector::VisitWord64Shr(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x3f;
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Select Dext for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation64(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_msb + mask_width + lsb) == 64) {
+ Mips64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
+ Emit(kMips64Dext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMips64Dshr, node);
}
@@ -333,11 +557,33 @@
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ VisitRR(this, kMips64Clz, node);
+}
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitWord64Ror(Node* node) {
VisitRRO(this, kMips64Dror, node);
}
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ VisitRR(this, kMips64Dclz, node);
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
Mips64OperandGenerator g(this);
// TODO(plind): Consider multiply & add optimization from arm port.
@@ -374,7 +620,7 @@
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
@@ -383,7 +629,7 @@
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value + 1)));
@@ -392,25 +638,32 @@
return;
}
}
- Emit(kMips64Mul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.UseRegister(m.right().node()));
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher leftInput(left), rightInput(right);
+ if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
+ // Combine untagging shifts with Dmul high.
+ Emit(kMips64DMulHigh, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ VisitRRR(this, kMips64Mul, node);
}
void InstructionSelector::VisitInt32MulHigh(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64MulHigh, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ VisitRRR(this, kMips64MulHigh, node);
}
void InstructionSelector::VisitUint32MulHigh(Node* node) {
- Mips64OperandGenerator g(this);
- InstructionOperand* const dmul_operand = g.TempRegister();
- Emit(kMips64MulHighU, dmul_operand, g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)));
- Emit(kMips64Ext, g.DefineAsRegister(node), dmul_operand, g.TempImmediate(0),
- g.TempImmediate(32));
+ VisitRRR(this, kMips64MulHighU, node);
}
@@ -419,7 +672,7 @@
Int64BinopMatcher m(node);
// TODO(dusmil): Add optimization for shifts larger than 32.
if (m.right().HasValue() && m.right().Value() > 0) {
- int64_t value = m.right().Value();
+ int32_t value = static_cast<int32_t>(m.right().Value());
if (base::bits::IsPowerOfTwo32(value)) {
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
@@ -427,7 +680,7 @@
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
@@ -436,7 +689,7 @@
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
- InstructionOperand* temp = g.TempRegister();
+ InstructionOperand temp = g.TempRegister();
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value + 1)));
@@ -453,7 +706,22 @@
void InstructionSelector::VisitInt32Div(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMips64Div, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Ddiv.
+ Emit(kMips64Ddiv, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -461,7 +729,7 @@
void InstructionSelector::VisitUint32Div(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMips64DivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -469,6 +737,21 @@
void InstructionSelector::VisitInt32Mod(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Dmod.
+ Emit(kMips64Dmod, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -485,7 +768,7 @@
void InstructionSelector::VisitInt64Div(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
- Emit(kMips64Ddiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -493,7 +776,7 @@
void InstructionSelector::VisitUint64Div(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
- Emit(kMips64DdivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -515,35 +798,151 @@
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64CvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMips64CvtDS, node);
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64CvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMips64CvtDW, node);
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64CvtDUw, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMips64CvtDUw, node);
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kMips64TruncWD, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ Node* value = node->InputAt(0);
+ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
+ // which does rounding and conversion to integer format.
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kFloat64RoundDown:
+ Emit(kMips64FloorWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundUp:
+ Emit(kMips64CeilWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTiesEven:
+ Emit(kMips64RoundWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTruncate:
+ Emit(kMips64TruncWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ default:
+ break;
+ }
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (CanCover(value, next)) {
+ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
+ switch (next->opcode()) {
+ case IrOpcode::kFloat32RoundDown:
+ Emit(kMips64FloorWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundUp:
+ Emit(kMips64CeilWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTiesEven:
+ Emit(kMips64RoundWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTruncate:
+ Emit(kMips64TruncWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ default:
+ Emit(kMips64TruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ } else {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kMips64TruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
+ VisitRR(this, kMips64TruncWD, node);
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ VisitRR(this, kMips64TruncUwD, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kMips64TruncUwD, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ Mips64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
}
@@ -563,6 +962,24 @@
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Mips64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord64Sar: {
+ Int64BinopMatcher m(value);
+ if (m.right().IsInRange(32, 63)) {
+ // After smi untagging no need for truncate. Combine sequence.
+ Emit(kMips64Dsar, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32));
}
@@ -570,7 +987,75 @@
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kMips64CvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kMips64CvtSW, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ VisitRR(this, kMips64CvtSD, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+ switch (TruncationModeOf(node->op())) {
+ case TruncationMode::kJavaScript:
+ return VisitRR(this, kArchTruncateDoubleToI, node);
+ case TruncationMode::kRoundToZero:
+ return VisitRR(this, kMips64TruncWD, node);
+ }
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kMips64CvtSL, node);
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kMips64CvtDL, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kMips64CvtSUl, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kMips64CvtDUl, node);
+}
+
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kMips64Float64ExtractLowWord32, node);
+}
+
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kMips64BitcastDL, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kMips64BitcastLD, node);
+}
+
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRRR(this, kMips64AddS, node);
}
@@ -579,16 +1064,45 @@
}
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ VisitRRR(this, kMips64SubS, node);
+}
+
+
void InstructionSelector::VisitFloat64Sub(Node* node) {
+ Mips64OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kMips64Float64RoundUp, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
VisitRRR(this, kMips64SubD, node);
}
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kMips64MulS, node);
+}
+
+
void InstructionSelector::VisitFloat64Mul(Node* node) {
VisitRRR(this, kMips64MulD, node);
}
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kMips64DivS, node);
+}
+
+
void InstructionSelector::VisitFloat64Div(Node* node) {
VisitRRR(this, kMips64DivD, node);
}
@@ -602,19 +1116,108 @@
}
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+void InstructionSelector::VisitFloat32Max(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kMips64SqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float32Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float32Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- VisitRR(this, kMips64Float64Floor, node);
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float64Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float64Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
}
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- VisitRR(this, kMips64Float64Ceil, node);
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float32Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float32Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float64Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float64Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kMips64AbsS, node);
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kMips64AbsD, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kMips64SqrtS, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRR(this, kMips64SqrtD, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kMips64Float32RoundDown, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kMips64Float64RoundDown, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kMips64Float32RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kMips64Float64RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kMips64Float32RoundTruncate, node);
}
@@ -628,95 +1231,95 @@
}
-void InstructionSelector::VisitCall(Node* node) {
- Mips64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = NULL;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
-
- int push_count = buffer.pushed_nodes.size();
- if (push_count > 0) {
- Emit(kMips64StackClaim | MiscField::encode(push_count), NULL);
- }
- int slot = buffer.pushed_nodes.size() - 1;
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
- Emit(kMips64StoreToStackSlot | MiscField::encode(slot), NULL,
- g.UseRegister(*input));
- slot--;
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- Instruction* call_instr =
- Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
- buffer.instruction_args.size(), &buffer.instruction_args.front());
-
- call_instr->MarkAsCall();
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kMips64Float32RoundTiesEven, node);
}
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kMips64Float64RoundTiesEven, node);
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ Mips64OperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = kCArgSlotCount;
+ for (PushParameter input : (*arguments)) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ int push_count = static_cast<int>(descriptor->StackParameterCount());
+ if (push_count > 0) {
+ Emit(kMips64StackClaim, g.NoOutput(),
+ g.TempImmediate(push_count << kPointerSizeLog2));
+ }
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
+ }
+ }
+ }
+}
+
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kWord64:
+ opcode = kCheckedLoadWord64;
+ break;
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
+ InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
- InstructionOperand* length_operand =
- (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
+ InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
+ ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), offset_operand, length_operand,
@@ -725,45 +1328,51 @@
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kWord64:
+ opcode = kCheckedStoreWord64;
+ break;
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
- InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
+ InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
+ ? g.UseImmediate(offset)
+ : g.UseRegister(offset);
- InstructionOperand* length_operand =
- (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
+ InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
+ ? g.CanBeImmediate(length, opcode)
+ ? g.UseImmediate(length)
+ : g.UseRegister(length)
+ : g.UseRegister(length);
- Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr, offset_operand,
- length_operand, g.UseRegister(value), g.UseRegister(buffer));
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ offset_operand, length_operand, g.UseRegister(value),
+ g.UseRegister(buffer));
}
@@ -771,13 +1380,13 @@
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand* left, InstructionOperand* right,
+ InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
- selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -785,14 +1394,33 @@
}
-// Shared routine for multiple float compare operations.
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Mips64OperandGenerator g(selector);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
+}
+
+
+// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kMips64CmpD, g.UseRegister(left), g.UseRegister(right),
- cont);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
}
@@ -805,13 +1433,53 @@
Node* right = node->InputAt(1);
// Match immediates on left or right side of comparison.
- if (g.CanBeImmediate(right, opcode, cont)) {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
- cont);
- } else if (g.CanBeImmediate(left, opcode, cont)) {
+ if (g.CanBeImmediate(right, opcode)) {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ } else if (g.CanBeImmediate(left, opcode)) {
if (!commutative) cont->Commute();
- VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
- cont);
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
} else {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
cont);
@@ -821,7 +1489,7 @@
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- VisitWordCompare(selector, node, kMips64Cmp32, cont, false);
+ VisitWordCompare(selector, node, kMips64Cmp, cont, false);
}
@@ -833,15 +1501,14 @@
} // namespace
-void EmitWordCompareZero(InstructionSelector* selector, InstructionCode opcode,
- Node* value, FlagsContinuation* cont) {
+void EmitWordCompareZero(InstructionSelector* selector, Node* value,
+ FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
- opcode = cont->Encode(opcode);
- InstructionOperand* const value_operand = g.UseRegister(value);
+ InstructionCode opcode = cont->Encode(kMips64Cmp);
+ InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
- selector->Emit(opcode, nullptr, value_operand, g.TempImmediate(0),
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@@ -852,13 +1519,7 @@
// Shared routine for word comparisons against zero.
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
- // Initially set comparison against 0 to be 64-bit variant for branches that
- // cannot combine.
- InstructionCode opcode = kMips64Cmp;
while (selector->CanCover(user, value)) {
- if (user->opcode() == IrOpcode::kWord32Equal) {
- opcode = kMips64Cmp32;
- }
switch (value->opcode()) {
case IrOpcode::kWord32Equal: {
// Combine with comparisons against 0 by simply inverting the
@@ -868,7 +1529,6 @@
user = value;
value = m.left().node();
cont->Negate();
- opcode = kMips64Cmp32;
continue;
}
cont->OverwriteAndNegateIfEqual(kEqual);
@@ -908,27 +1568,39 @@
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat32Compare(selector, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
- cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+ cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
// <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
+ if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
- Node* node = value->InputAt(0);
- Node* result = node->FindProjection(0);
- if (result == NULL || selector->IsDefined(result)) {
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -936,6 +1608,12 @@
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kMips64Dsub, cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kMips64DaddOvf, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kMips64DsubOvf, cont);
default:
break;
}
@@ -943,7 +1621,6 @@
}
break;
case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kMips64Tst32, cont, true);
case IrOpcode::kWord64And:
return VisitWordCompare(selector, value, kMips64Tst, cont, true);
default:
@@ -953,7 +1630,7 @@
}
// Continuation could not be combined with a compare, emit compare against 0.
- EmitWordCompareZero(selector, opcode, value, cont);
+ EmitWordCompareZero(selector, value, cont);
}
@@ -964,6 +1641,34 @@
}
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 10 + 2 * sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kMips64Sub, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+
+ // Generate a sequence of conditional jumps.
+ return EmitLookupSwitch(sw, value_operand);
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
@@ -1000,7 +1705,7 @@
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMips64Dadd, &cont);
}
@@ -1010,7 +1715,7 @@
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
return VisitBinop(this, node, kMips64Dsub, &cont);
}
@@ -1019,6 +1724,26 @@
}
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kMips64DaddOvf, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMips64DaddOvf, &cont);
+}
+
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kMips64DsubOvf, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMips64DsubOvf, &cont);
+}
+
+
void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int64BinopMatcher m(node);
@@ -1048,30 +1773,94 @@
}
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+
void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
+ FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
+ FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ VisitRR(this, kMips64Float64ExtractLowWord32, node);
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ VisitRR(this, kMips64Float64ExtractHighWord32, node);
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kMips64Float64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kMips64Float64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
- MachineOperatorBuilder::kFloat64RoundTruncate;
+ return MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
}
} // namespace compiler