Upgrade V8 to version 4.9.385.28
https://chromium.googlesource.com/v8/v8/+/4.9.385.28
FPIIM-449
Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/test/unittests/compiler/register-allocator-unittest.cc b/test/unittests/compiler/register-allocator-unittest.cc
index 12dedbd..c5ff90f 100644
--- a/test/unittests/compiler/register-allocator-unittest.cc
+++ b/test/unittests/compiler/register-allocator-unittest.cc
@@ -9,6 +9,77 @@
namespace internal {
namespace compiler {
+
+namespace {
+
+// We can't just use the size of the moves collection, because of
+// redundant moves which need to be discounted.
+int GetMoveCount(const ParallelMove& moves) {
+ int move_count = 0;
+ for (auto move : moves) {
+ if (move->IsEliminated() || move->IsRedundant()) continue;
+ ++move_count;
+ }
+ return move_count;
+}
+
+
+bool AreOperandsOfSameType(
+ const AllocatedOperand& op,
+ const InstructionSequenceTest::TestOperand& test_op) {
+ bool test_op_is_reg =
+ (test_op.type_ ==
+ InstructionSequenceTest::TestOperandType::kFixedRegister ||
+ test_op.type_ == InstructionSequenceTest::TestOperandType::kRegister);
+
+ return (op.IsRegister() && test_op_is_reg) ||
+ (op.IsStackSlot() && !test_op_is_reg);
+}
+
+
+bool AllocatedOperandMatches(
+ const AllocatedOperand& op,
+ const InstructionSequenceTest::TestOperand& test_op) {
+ return AreOperandsOfSameType(op, test_op) &&
+ ((op.IsRegister() ? op.GetRegister().code() : op.index()) ==
+ test_op.value_ ||
+ test_op.value_ == InstructionSequenceTest::kNoValue);
+}
+
+
+int GetParallelMoveCount(int instr_index, Instruction::GapPosition gap_pos,
+ const InstructionSequence* sequence) {
+ const ParallelMove* moves =
+ sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
+ if (moves == nullptr) return 0;
+ return GetMoveCount(*moves);
+}
+
+
+bool IsParallelMovePresent(int instr_index, Instruction::GapPosition gap_pos,
+ const InstructionSequence* sequence,
+ const InstructionSequenceTest::TestOperand& src,
+ const InstructionSequenceTest::TestOperand& dest) {
+ const ParallelMove* moves =
+ sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
+ EXPECT_NE(nullptr, moves);
+
+ bool found_match = false;
+ for (auto move : *moves) {
+ if (move->IsEliminated() || move->IsRedundant()) continue;
+ if (AllocatedOperandMatches(AllocatedOperand::cast(move->source()), src) &&
+ AllocatedOperandMatches(AllocatedOperand::cast(move->destination()),
+ dest)) {
+ found_match = true;
+ break;
+ }
+ }
+ return found_match;
+}
+
+} // namespace
+
+
class RegisterAllocatorTest : public InstructionSequenceTest {
public:
void Allocate() {
@@ -42,9 +113,9 @@
StartLoop(1);
StartBlock();
- auto phi = Phi(i_reg);
+ auto phi = Phi(i_reg, 2);
auto ipp = EmitOI(Same(), Reg(phi), Use(DefineConstant()));
- Extend(phi, ipp);
+ SetInput(phi, 1, ipp);
EndBlock(Jump(0));
EndLoop();
@@ -206,14 +277,14 @@
StartBlock();
for (size_t i = 0; i < arraysize(parameters); ++i) {
- phis[i] = Phi(parameters[i]);
+ phis[i] = Phi(parameters[i], 2);
}
// Perform some computations.
// something like phi[i] += const
for (size_t i = 0; i < arraysize(parameters); ++i) {
auto result = EmitOI(Same(), Reg(phis[i]), Use(constant));
- Extend(phis[i], result);
+ SetInput(phis[i], 1, result);
}
EndBlock(Branch(Reg(DefineConstant()), 1, 2));
@@ -301,6 +372,31 @@
}
+TEST_F(RegisterAllocatorTest, SplitBeforeInstruction2) {
+ const int kNumRegs = 6;
+ SetNumRegs(kNumRegs, kNumRegs);
+
+ StartBlock();
+
+ // Stack parameters/spilled values.
+ auto p_0 = Define(Slot(-1));
+ auto p_1 = Define(Slot(-2));
+
+ // Fill registers.
+ VReg values[kNumRegs];
+ for (size_t i = 0; i < arraysize(values); ++i) {
+ values[i] = Define(Reg(static_cast<int>(i)));
+ }
+
+ // values[0] and [1] will be split in the second half of this instruction.
+ EmitOOI(Reg(0), Reg(1), Reg(p_0, 0), Reg(p_1, 1));
+ EmitI(Reg(values[0]), Reg(values[1]));
+ EndBlock(Last());
+
+ Allocate();
+}
+
+
TEST_F(RegisterAllocatorTest, NestedDiamondPhiMerge) {
// Outer diamond.
StartBlock();
@@ -432,6 +528,260 @@
}
+TEST_F(RegisterAllocatorTest, RegressionLoadConstantBeforeSpill) {
+ StartBlock();
+ // Fill registers.
+ VReg values[kDefaultNRegs];
+ for (size_t i = arraysize(values); i > 0; --i) {
+ values[i - 1] = Define(Reg(static_cast<int>(i - 1)));
+ }
+ auto c = DefineConstant();
+ auto to_spill = Define(Reg());
+ EndBlock(Jump(1));
+
+ {
+ StartLoop(1);
+
+ StartBlock();
+ // Create a use for c in second half of prev block's last gap
+ Phi(c);
+ for (size_t i = arraysize(values); i > 0; --i) {
+ Phi(values[i - 1]);
+ }
+ EndBlock(Jump(1));
+
+ EndLoop();
+ }
+
+ StartBlock();
+ // Force c to split within to_spill's definition.
+ EmitI(Reg(c));
+ EmitI(Reg(to_spill));
+ EndBlock(Last());
+
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, DiamondWithCallFirstBlock) {
+ StartBlock();
+ auto x = EmitOI(Reg(0));
+ EndBlock(Branch(Reg(x), 1, 2));
+
+ StartBlock();
+ EmitCall(Slot(-1));
+ auto occupy = EmitOI(Reg(0));
+ EndBlock(Jump(2));
+
+ StartBlock();
+ EndBlock(FallThrough());
+
+ StartBlock();
+ Use(occupy);
+ Return(Reg(x));
+ EndBlock();
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, DiamondWithCallSecondBlock) {
+ StartBlock();
+ auto x = EmitOI(Reg(0));
+ EndBlock(Branch(Reg(x), 1, 2));
+
+ StartBlock();
+ EndBlock(Jump(2));
+
+ StartBlock();
+ EmitCall(Slot(-1));
+ auto occupy = EmitOI(Reg(0));
+ EndBlock(FallThrough());
+
+ StartBlock();
+ Use(occupy);
+ Return(Reg(x));
+ EndBlock();
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, SingleDeferredBlockSpill) {
+ StartBlock(); // B0
+ auto var = EmitOI(Reg(0));
+ EndBlock(Branch(Reg(var), 1, 2));
+
+ StartBlock(); // B1
+ EndBlock(Jump(2));
+
+ StartBlock(true); // B2
+ EmitCall(Slot(-1), Slot(var));
+ EndBlock();
+
+ StartBlock(); // B3
+ EmitNop();
+ EndBlock();
+
+ StartBlock(); // B4
+ Return(Reg(var, 0));
+ EndBlock();
+
+ Allocate();
+
+ const int var_def_index = 1;
+ const int call_index = 3;
+ int expect_no_moves =
+ FLAG_turbo_preprocess_ranges ? var_def_index : call_index;
+ int expect_spill_move =
+ FLAG_turbo_preprocess_ranges ? call_index : var_def_index;
+
+ // We should have no parallel moves at the "expect_no_moves" position.
+ EXPECT_EQ(
+ 0, GetParallelMoveCount(expect_no_moves, Instruction::START, sequence()));
+
+ // The spill should be performed at the position expect_spill_move.
+ EXPECT_TRUE(IsParallelMovePresent(expect_spill_move, Instruction::START,
+ sequence(), Reg(0), Slot(0)));
+}
+
+
+TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
+ if (!FLAG_turbo_preprocess_ranges) return;
+
+ StartBlock(); // B0
+ auto var1 = EmitOI(Reg(0));
+ auto var2 = EmitOI(Reg(1));
+ auto var3 = EmitOI(Reg(2));
+ EndBlock(Branch(Reg(var1, 0), 1, 2));
+
+ StartBlock(true); // B1
+ EmitCall(Slot(-2), Slot(var1));
+ EndBlock(Jump(2));
+
+ StartBlock(true); // B2
+ EmitCall(Slot(-1), Slot(var2));
+ EndBlock();
+
+ StartBlock(); // B3
+ EmitNop();
+ EndBlock();
+
+ StartBlock(); // B4
+ Return(Reg(var3, 2));
+ EndBlock();
+
+ const int def_of_v2 = 3;
+ const int call_in_b1 = 4;
+ const int call_in_b2 = 6;
+ const int end_of_b1 = 5;
+ const int end_of_b2 = 7;
+ const int start_of_b3 = 8;
+
+ Allocate();
+ // TODO(mtrofin): at the moment, the linear allocator spills var1 and var2,
+ // so only var3 is spilled in deferred blocks. Greedy avoids spilling 1&2.
+ // Expand the test once greedy is back online with this facility.
+ const int var3_reg = 2;
+ const int var3_slot = 2;
+
+ EXPECT_FALSE(IsParallelMovePresent(def_of_v2, Instruction::START, sequence(),
+ Reg(var3_reg), Slot()));
+ EXPECT_TRUE(IsParallelMovePresent(call_in_b1, Instruction::START, sequence(),
+ Reg(var3_reg), Slot(var3_slot)));
+ EXPECT_TRUE(IsParallelMovePresent(end_of_b1, Instruction::START, sequence(),
+ Slot(var3_slot), Reg()));
+
+ EXPECT_TRUE(IsParallelMovePresent(call_in_b2, Instruction::START, sequence(),
+ Reg(var3_reg), Slot(var3_slot)));
+ EXPECT_TRUE(IsParallelMovePresent(end_of_b2, Instruction::START, sequence(),
+ Slot(var3_slot), Reg()));
+
+
+ EXPECT_EQ(0,
+ GetParallelMoveCount(start_of_b3, Instruction::START, sequence()));
+}
+
+
+namespace {
+
+enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister };
+
+const ParameterType kParameterTypes[] = {
+ ParameterType::kFixedSlot, ParameterType::kSlot, ParameterType::kRegister,
+ ParameterType::kFixedRegister};
+
+class SlotConstraintTest : public RegisterAllocatorTest,
+ public ::testing::WithParamInterface<
+ ::testing::tuple<ParameterType, int>> {
+ public:
+ static const int kMaxVariant = 5;
+
+ protected:
+ ParameterType parameter_type() const {
+ return ::testing::get<0>(B::GetParam());
+ }
+ int variant() const { return ::testing::get<1>(B::GetParam()); }
+
+ private:
+ typedef ::testing::WithParamInterface<::testing::tuple<ParameterType, int>> B;
+};
+
+} // namespace
+
+
+#if GTEST_HAS_COMBINE
+
+TEST_P(SlotConstraintTest, SlotConstraint) {
+ StartBlock();
+ VReg p_0;
+ switch (parameter_type()) {
+ case ParameterType::kFixedSlot:
+ p_0 = Parameter(Slot(-1));
+ break;
+ case ParameterType::kSlot:
+ p_0 = Parameter(Slot(-1));
+ break;
+ case ParameterType::kRegister:
+ p_0 = Parameter(Reg());
+ break;
+ case ParameterType::kFixedRegister:
+ p_0 = Parameter(Reg(1));
+ break;
+ }
+ switch (variant()) {
+ case 0:
+ EmitI(Slot(p_0), Reg(p_0));
+ break;
+ case 1:
+ EmitI(Slot(p_0));
+ break;
+ case 2:
+ EmitI(Reg(p_0));
+ EmitI(Slot(p_0));
+ break;
+ case 3:
+ EmitI(Slot(p_0));
+ EmitI(Reg(p_0));
+ break;
+ case 4:
+ EmitI(Slot(p_0, -1), Slot(p_0), Reg(p_0), Reg(p_0, 1));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ EndBlock(Last());
+
+ Allocate();
+}
+
+
+INSTANTIATE_TEST_CASE_P(
+ RegisterAllocatorTest, SlotConstraintTest,
+ ::testing::Combine(::testing::ValuesIn(kParameterTypes),
+ ::testing::Range(0, SlotConstraintTest::kMaxVariant)));
+
+#endif // GTEST_HAS_COMBINE
+
} // namespace compiler
} // namespace internal
} // namespace v8