blob: 71a726f1677450d5d2a1bd6c3a6ef4797ece2337 [file] [log] [blame]
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/pipeline.h"
6#include "test/unittests/compiler/instruction-sequence-unittest.h"
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000012
13namespace {
14
15// We can't just use the size of the moves collection, because of
16// redundant moves which need to be discounted.
17int GetMoveCount(const ParallelMove& moves) {
18 int move_count = 0;
19 for (auto move : moves) {
20 if (move->IsEliminated() || move->IsRedundant()) continue;
21 ++move_count;
22 }
23 return move_count;
24}
25
26
27bool AreOperandsOfSameType(
28 const AllocatedOperand& op,
29 const InstructionSequenceTest::TestOperand& test_op) {
30 bool test_op_is_reg =
31 (test_op.type_ ==
32 InstructionSequenceTest::TestOperandType::kFixedRegister ||
33 test_op.type_ == InstructionSequenceTest::TestOperandType::kRegister);
34
35 return (op.IsRegister() && test_op_is_reg) ||
36 (op.IsStackSlot() && !test_op_is_reg);
37}
38
39
40bool AllocatedOperandMatches(
41 const AllocatedOperand& op,
42 const InstructionSequenceTest::TestOperand& test_op) {
43 return AreOperandsOfSameType(op, test_op) &&
44 ((op.IsRegister() ? op.GetRegister().code() : op.index()) ==
45 test_op.value_ ||
46 test_op.value_ == InstructionSequenceTest::kNoValue);
47}
48
49
50int GetParallelMoveCount(int instr_index, Instruction::GapPosition gap_pos,
51 const InstructionSequence* sequence) {
52 const ParallelMove* moves =
53 sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
54 if (moves == nullptr) return 0;
55 return GetMoveCount(*moves);
56}
57
58
59bool IsParallelMovePresent(int instr_index, Instruction::GapPosition gap_pos,
60 const InstructionSequence* sequence,
61 const InstructionSequenceTest::TestOperand& src,
62 const InstructionSequenceTest::TestOperand& dest) {
63 const ParallelMove* moves =
64 sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
65 EXPECT_NE(nullptr, moves);
66
67 bool found_match = false;
68 for (auto move : *moves) {
69 if (move->IsEliminated() || move->IsRedundant()) continue;
70 if (AllocatedOperandMatches(AllocatedOperand::cast(move->source()), src) &&
71 AllocatedOperandMatches(AllocatedOperand::cast(move->destination()),
72 dest)) {
73 found_match = true;
74 break;
75 }
76 }
77 return found_match;
78}
79
80} // namespace
81
82
Emily Bernierd0a1eb72015-03-24 16:35:39 -040083class RegisterAllocatorTest : public InstructionSequenceTest {
84 public:
85 void Allocate() {
86 WireBlocks();
87 Pipeline::AllocateRegistersForTesting(config(), sequence(), true);
88 }
89};
90
91
92TEST_F(RegisterAllocatorTest, CanAllocateThreeRegisters) {
93 // return p0 + p1;
94 StartBlock();
95 auto a_reg = Parameter();
96 auto b_reg = Parameter();
97 auto c_reg = EmitOI(Reg(1), Reg(a_reg, 1), Reg(b_reg, 0));
98 Return(c_reg);
99 EndBlock(Last());
100
101 Allocate();
102}
103
104
105TEST_F(RegisterAllocatorTest, SimpleLoop) {
106 // i = K;
107 // while(true) { i++ }
108 StartBlock();
109 auto i_reg = DefineConstant();
110 EndBlock();
111
112 {
113 StartLoop(1);
114
115 StartBlock();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000116 auto phi = Phi(i_reg, 2);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400117 auto ipp = EmitOI(Same(), Reg(phi), Use(DefineConstant()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000118 SetInput(phi, 1, ipp);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400119 EndBlock(Jump(0));
120
121 EndLoop();
122 }
123
124 Allocate();
125}
126
127
128TEST_F(RegisterAllocatorTest, SimpleBranch) {
129 // return i ? K1 : K2
130 StartBlock();
131 auto i = DefineConstant();
132 EndBlock(Branch(Reg(i), 1, 2));
133
134 StartBlock();
135 Return(DefineConstant());
136 EndBlock(Last());
137
138 StartBlock();
139 Return(DefineConstant());
140 EndBlock(Last());
141
142 Allocate();
143}
144
145
146TEST_F(RegisterAllocatorTest, SimpleDiamond) {
147 // return p0 ? p0 : p0
148 StartBlock();
149 auto param = Parameter();
150 EndBlock(Branch(Reg(param), 1, 2));
151
152 StartBlock();
153 EndBlock(Jump(2));
154
155 StartBlock();
156 EndBlock(Jump(1));
157
158 StartBlock();
159 Return(param);
160 EndBlock();
161
162 Allocate();
163}
164
165
166TEST_F(RegisterAllocatorTest, SimpleDiamondPhi) {
167 // return i ? K1 : K2
168 StartBlock();
169 EndBlock(Branch(Reg(DefineConstant()), 1, 2));
170
171 StartBlock();
172 auto t_val = DefineConstant();
173 EndBlock(Jump(2));
174
175 StartBlock();
176 auto f_val = DefineConstant();
177 EndBlock(Jump(1));
178
179 StartBlock();
180 Return(Reg(Phi(t_val, f_val)));
181 EndBlock();
182
183 Allocate();
184}
185
186
187TEST_F(RegisterAllocatorTest, DiamondManyPhis) {
188 const int kPhis = kDefaultNRegs * 2;
189
190 StartBlock();
191 EndBlock(Branch(Reg(DefineConstant()), 1, 2));
192
193 StartBlock();
194 VReg t_vals[kPhis];
195 for (int i = 0; i < kPhis; ++i) {
196 t_vals[i] = DefineConstant();
197 }
198 EndBlock(Jump(2));
199
200 StartBlock();
201 VReg f_vals[kPhis];
202 for (int i = 0; i < kPhis; ++i) {
203 f_vals[i] = DefineConstant();
204 }
205 EndBlock(Jump(1));
206
207 StartBlock();
208 TestOperand merged[kPhis];
209 for (int i = 0; i < kPhis; ++i) {
210 merged[i] = Use(Phi(t_vals[i], f_vals[i]));
211 }
212 Return(EmitCall(Slot(-1), kPhis, merged));
213 EndBlock();
214
215 Allocate();
216}
217
218
219TEST_F(RegisterAllocatorTest, DoubleDiamondManyRedundantPhis) {
220 const int kPhis = kDefaultNRegs * 2;
221
222 // First diamond.
223 StartBlock();
224 VReg vals[kPhis];
225 for (int i = 0; i < kPhis; ++i) {
226 vals[i] = Parameter(Slot(-1 - i));
227 }
228 EndBlock(Branch(Reg(DefineConstant()), 1, 2));
229
230 StartBlock();
231 EndBlock(Jump(2));
232
233 StartBlock();
234 EndBlock(Jump(1));
235
236 // Second diamond.
237 StartBlock();
238 EndBlock(Branch(Reg(DefineConstant()), 1, 2));
239
240 StartBlock();
241 EndBlock(Jump(2));
242
243 StartBlock();
244 EndBlock(Jump(1));
245
246 StartBlock();
247 TestOperand merged[kPhis];
248 for (int i = 0; i < kPhis; ++i) {
249 merged[i] = Use(Phi(vals[i], vals[i]));
250 }
251 Return(EmitCall(Reg(0), kPhis, merged));
252 EndBlock();
253
254 Allocate();
255}
256
257
258TEST_F(RegisterAllocatorTest, RegressionPhisNeedTooManyRegisters) {
259 const size_t kNumRegs = 3;
260 const size_t kParams = kNumRegs + 1;
261 // Override number of registers.
262 SetNumRegs(kNumRegs, kNumRegs);
263
264 StartBlock();
265 auto constant = DefineConstant();
266 VReg parameters[kParams];
267 for (size_t i = 0; i < arraysize(parameters); ++i) {
268 parameters[i] = DefineConstant();
269 }
270 EndBlock();
271
272 PhiInstruction* phis[kParams];
273 {
274 StartLoop(2);
275
276 // Loop header.
277 StartBlock();
278
279 for (size_t i = 0; i < arraysize(parameters); ++i) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000280 phis[i] = Phi(parameters[i], 2);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400281 }
282
283 // Perform some computations.
284 // something like phi[i] += const
285 for (size_t i = 0; i < arraysize(parameters); ++i) {
286 auto result = EmitOI(Same(), Reg(phis[i]), Use(constant));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000287 SetInput(phis[i], 1, result);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400288 }
289
290 EndBlock(Branch(Reg(DefineConstant()), 1, 2));
291
292 // Jump back to loop header.
293 StartBlock();
294 EndBlock(Jump(-1));
295
296 EndLoop();
297 }
298
299 StartBlock();
300 Return(DefineConstant());
301 EndBlock();
302
303 Allocate();
304}
305
306
307TEST_F(RegisterAllocatorTest, SpillPhi) {
308 StartBlock();
309 EndBlock(Branch(Imm(), 1, 2));
310
311 StartBlock();
312 auto left = Define(Reg(0));
313 EndBlock(Jump(2));
314
315 StartBlock();
316 auto right = Define(Reg(0));
317 EndBlock();
318
319 StartBlock();
320 auto phi = Phi(left, right);
321 EmitCall(Slot(-1));
322 Return(Reg(phi));
323 EndBlock();
324
325 Allocate();
326}
327
328
329TEST_F(RegisterAllocatorTest, MoveLotsOfConstants) {
330 StartBlock();
331 VReg constants[kDefaultNRegs];
332 for (size_t i = 0; i < arraysize(constants); ++i) {
333 constants[i] = DefineConstant();
334 }
335 TestOperand call_ops[kDefaultNRegs * 2];
336 for (int i = 0; i < kDefaultNRegs; ++i) {
337 call_ops[i] = Reg(constants[i], i);
338 }
339 for (int i = 0; i < kDefaultNRegs; ++i) {
340 call_ops[i + kDefaultNRegs] = Slot(constants[i], i);
341 }
342 EmitCall(Slot(-1), arraysize(call_ops), call_ops);
343 EndBlock(Last());
344
345 Allocate();
346}
347
348
349TEST_F(RegisterAllocatorTest, SplitBeforeInstruction) {
350 const int kNumRegs = 6;
351 SetNumRegs(kNumRegs, kNumRegs);
352
353 StartBlock();
354
355 // Stack parameters/spilled values.
356 auto p_0 = Define(Slot(-1));
357 auto p_1 = Define(Slot(-2));
358
359 // Fill registers.
360 VReg values[kNumRegs];
361 for (size_t i = 0; i < arraysize(values); ++i) {
362 values[i] = Define(Reg(static_cast<int>(i)));
363 }
364
365 // values[0] will be split in the second half of this instruction.
366 // Models Intel mod instructions.
367 EmitOI(Reg(0), Reg(p_0, 1), UniqueReg(p_1));
368 EmitI(Reg(values[0], 0));
369 EndBlock(Last());
370
371 Allocate();
372}
373
374
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000375TEST_F(RegisterAllocatorTest, SplitBeforeInstruction2) {
376 const int kNumRegs = 6;
377 SetNumRegs(kNumRegs, kNumRegs);
378
379 StartBlock();
380
381 // Stack parameters/spilled values.
382 auto p_0 = Define(Slot(-1));
383 auto p_1 = Define(Slot(-2));
384
385 // Fill registers.
386 VReg values[kNumRegs];
387 for (size_t i = 0; i < arraysize(values); ++i) {
388 values[i] = Define(Reg(static_cast<int>(i)));
389 }
390
391 // values[0] and [1] will be split in the second half of this instruction.
392 EmitOOI(Reg(0), Reg(1), Reg(p_0, 0), Reg(p_1, 1));
393 EmitI(Reg(values[0]), Reg(values[1]));
394 EndBlock(Last());
395
396 Allocate();
397}
398
399
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400400TEST_F(RegisterAllocatorTest, NestedDiamondPhiMerge) {
401 // Outer diamond.
402 StartBlock();
403 EndBlock(Branch(Imm(), 1, 5));
404
405 // Diamond 1
406 StartBlock();
407 EndBlock(Branch(Imm(), 1, 2));
408
409 StartBlock();
410 auto ll = Define(Reg());
411 EndBlock(Jump(2));
412
413 StartBlock();
414 auto lr = Define(Reg());
415 EndBlock();
416
417 StartBlock();
418 auto l_phi = Phi(ll, lr);
419 EndBlock(Jump(5));
420
421 // Diamond 2
422 StartBlock();
423 EndBlock(Branch(Imm(), 1, 2));
424
425 StartBlock();
426 auto rl = Define(Reg());
427 EndBlock(Jump(2));
428
429 StartBlock();
430 auto rr = Define(Reg());
431 EndBlock();
432
433 StartBlock();
434 auto r_phi = Phi(rl, rr);
435 EndBlock();
436
437 // Outer diamond merge.
438 StartBlock();
439 auto phi = Phi(l_phi, r_phi);
440 Return(Reg(phi));
441 EndBlock();
442
443 Allocate();
444}
445
446
447TEST_F(RegisterAllocatorTest, NestedDiamondPhiMergeDifferent) {
448 // Outer diamond.
449 StartBlock();
450 EndBlock(Branch(Imm(), 1, 5));
451
452 // Diamond 1
453 StartBlock();
454 EndBlock(Branch(Imm(), 1, 2));
455
456 StartBlock();
457 auto ll = Define(Reg(0));
458 EndBlock(Jump(2));
459
460 StartBlock();
461 auto lr = Define(Reg(1));
462 EndBlock();
463
464 StartBlock();
465 auto l_phi = Phi(ll, lr);
466 EndBlock(Jump(5));
467
468 // Diamond 2
469 StartBlock();
470 EndBlock(Branch(Imm(), 1, 2));
471
472 StartBlock();
473 auto rl = Define(Reg(2));
474 EndBlock(Jump(2));
475
476 StartBlock();
477 auto rr = Define(Reg(3));
478 EndBlock();
479
480 StartBlock();
481 auto r_phi = Phi(rl, rr);
482 EndBlock();
483
484 // Outer diamond merge.
485 StartBlock();
486 auto phi = Phi(l_phi, r_phi);
487 Return(Reg(phi));
488 EndBlock();
489
490 Allocate();
491}
492
493
494TEST_F(RegisterAllocatorTest, RegressionSplitBeforeAndMove) {
495 StartBlock();
496
497 // Fill registers.
498 VReg values[kDefaultNRegs];
499 for (size_t i = 0; i < arraysize(values); ++i) {
500 if (i == 0 || i == 1) continue; // Leave a hole for c_1 to take.
501 values[i] = Define(Reg(static_cast<int>(i)));
502 }
503
504 auto c_0 = DefineConstant();
505 auto c_1 = DefineConstant();
506
507 EmitOI(Reg(1), Reg(c_0, 0), UniqueReg(c_1));
508
509 // Use previous values to force c_1 to split before the previous instruction.
510 for (size_t i = 0; i < arraysize(values); ++i) {
511 if (i == 0 || i == 1) continue;
512 EmitI(Reg(values[i], static_cast<int>(i)));
513 }
514
515 EndBlock(Last());
516
517 Allocate();
518}
519
520
521TEST_F(RegisterAllocatorTest, RegressionSpillTwice) {
522 StartBlock();
523 auto p_0 = Parameter(Reg(1));
524 EmitCall(Slot(-2), Unique(p_0), Reg(p_0, 1));
525 EndBlock(Last());
526
527 Allocate();
528}
529
530
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000531TEST_F(RegisterAllocatorTest, RegressionLoadConstantBeforeSpill) {
532 StartBlock();
533 // Fill registers.
534 VReg values[kDefaultNRegs];
535 for (size_t i = arraysize(values); i > 0; --i) {
536 values[i - 1] = Define(Reg(static_cast<int>(i - 1)));
537 }
538 auto c = DefineConstant();
539 auto to_spill = Define(Reg());
540 EndBlock(Jump(1));
541
542 {
543 StartLoop(1);
544
545 StartBlock();
546 // Create a use for c in second half of prev block's last gap
547 Phi(c);
548 for (size_t i = arraysize(values); i > 0; --i) {
549 Phi(values[i - 1]);
550 }
551 EndBlock(Jump(1));
552
553 EndLoop();
554 }
555
556 StartBlock();
557 // Force c to split within to_spill's definition.
558 EmitI(Reg(c));
559 EmitI(Reg(to_spill));
560 EndBlock(Last());
561
562 Allocate();
563}
564
565
566TEST_F(RegisterAllocatorTest, DiamondWithCallFirstBlock) {
567 StartBlock();
568 auto x = EmitOI(Reg(0));
569 EndBlock(Branch(Reg(x), 1, 2));
570
571 StartBlock();
572 EmitCall(Slot(-1));
573 auto occupy = EmitOI(Reg(0));
574 EndBlock(Jump(2));
575
576 StartBlock();
577 EndBlock(FallThrough());
578
579 StartBlock();
580 Use(occupy);
581 Return(Reg(x));
582 EndBlock();
583 Allocate();
584}
585
586
587TEST_F(RegisterAllocatorTest, DiamondWithCallSecondBlock) {
588 StartBlock();
589 auto x = EmitOI(Reg(0));
590 EndBlock(Branch(Reg(x), 1, 2));
591
592 StartBlock();
593 EndBlock(Jump(2));
594
595 StartBlock();
596 EmitCall(Slot(-1));
597 auto occupy = EmitOI(Reg(0));
598 EndBlock(FallThrough());
599
600 StartBlock();
601 Use(occupy);
602 Return(Reg(x));
603 EndBlock();
604 Allocate();
605}
606
607
608TEST_F(RegisterAllocatorTest, SingleDeferredBlockSpill) {
609 StartBlock(); // B0
610 auto var = EmitOI(Reg(0));
611 EndBlock(Branch(Reg(var), 1, 2));
612
613 StartBlock(); // B1
614 EndBlock(Jump(2));
615
616 StartBlock(true); // B2
617 EmitCall(Slot(-1), Slot(var));
618 EndBlock();
619
620 StartBlock(); // B3
621 EmitNop();
622 EndBlock();
623
624 StartBlock(); // B4
625 Return(Reg(var, 0));
626 EndBlock();
627
628 Allocate();
629
630 const int var_def_index = 1;
631 const int call_index = 3;
632 int expect_no_moves =
633 FLAG_turbo_preprocess_ranges ? var_def_index : call_index;
634 int expect_spill_move =
635 FLAG_turbo_preprocess_ranges ? call_index : var_def_index;
636
637 // We should have no parallel moves at the "expect_no_moves" position.
638 EXPECT_EQ(
639 0, GetParallelMoveCount(expect_no_moves, Instruction::START, sequence()));
640
641 // The spill should be performed at the position expect_spill_move.
642 EXPECT_TRUE(IsParallelMovePresent(expect_spill_move, Instruction::START,
643 sequence(), Reg(0), Slot(0)));
644}
645
646
647TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
648 if (!FLAG_turbo_preprocess_ranges) return;
649
650 StartBlock(); // B0
651 auto var1 = EmitOI(Reg(0));
652 auto var2 = EmitOI(Reg(1));
653 auto var3 = EmitOI(Reg(2));
654 EndBlock(Branch(Reg(var1, 0), 1, 2));
655
656 StartBlock(true); // B1
657 EmitCall(Slot(-2), Slot(var1));
658 EndBlock(Jump(2));
659
660 StartBlock(true); // B2
661 EmitCall(Slot(-1), Slot(var2));
662 EndBlock();
663
664 StartBlock(); // B3
665 EmitNop();
666 EndBlock();
667
668 StartBlock(); // B4
669 Return(Reg(var3, 2));
670 EndBlock();
671
672 const int def_of_v2 = 3;
673 const int call_in_b1 = 4;
674 const int call_in_b2 = 6;
675 const int end_of_b1 = 5;
676 const int end_of_b2 = 7;
677 const int start_of_b3 = 8;
678
679 Allocate();
680 // TODO(mtrofin): at the moment, the linear allocator spills var1 and var2,
Ben Murdoch61f157c2016-09-16 13:49:30 +0100681 // so only var3 is spilled in deferred blocks.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000682 const int var3_reg = 2;
683 const int var3_slot = 2;
684
685 EXPECT_FALSE(IsParallelMovePresent(def_of_v2, Instruction::START, sequence(),
686 Reg(var3_reg), Slot()));
687 EXPECT_TRUE(IsParallelMovePresent(call_in_b1, Instruction::START, sequence(),
688 Reg(var3_reg), Slot(var3_slot)));
689 EXPECT_TRUE(IsParallelMovePresent(end_of_b1, Instruction::START, sequence(),
690 Slot(var3_slot), Reg()));
691
692 EXPECT_TRUE(IsParallelMovePresent(call_in_b2, Instruction::START, sequence(),
693 Reg(var3_reg), Slot(var3_slot)));
694 EXPECT_TRUE(IsParallelMovePresent(end_of_b2, Instruction::START, sequence(),
695 Slot(var3_slot), Reg()));
696
697
698 EXPECT_EQ(0,
699 GetParallelMoveCount(start_of_b3, Instruction::START, sequence()));
700}
701
702
703namespace {
704
705enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister };
706
707const ParameterType kParameterTypes[] = {
708 ParameterType::kFixedSlot, ParameterType::kSlot, ParameterType::kRegister,
709 ParameterType::kFixedRegister};
710
711class SlotConstraintTest : public RegisterAllocatorTest,
712 public ::testing::WithParamInterface<
713 ::testing::tuple<ParameterType, int>> {
714 public:
715 static const int kMaxVariant = 5;
716
717 protected:
718 ParameterType parameter_type() const {
719 return ::testing::get<0>(B::GetParam());
720 }
721 int variant() const { return ::testing::get<1>(B::GetParam()); }
722
723 private:
724 typedef ::testing::WithParamInterface<::testing::tuple<ParameterType, int>> B;
725};
726
727} // namespace
728
729
730#if GTEST_HAS_COMBINE
731
732TEST_P(SlotConstraintTest, SlotConstraint) {
733 StartBlock();
734 VReg p_0;
735 switch (parameter_type()) {
736 case ParameterType::kFixedSlot:
737 p_0 = Parameter(Slot(-1));
738 break;
739 case ParameterType::kSlot:
740 p_0 = Parameter(Slot(-1));
741 break;
742 case ParameterType::kRegister:
743 p_0 = Parameter(Reg());
744 break;
745 case ParameterType::kFixedRegister:
746 p_0 = Parameter(Reg(1));
747 break;
748 }
749 switch (variant()) {
750 case 0:
751 EmitI(Slot(p_0), Reg(p_0));
752 break;
753 case 1:
754 EmitI(Slot(p_0));
755 break;
756 case 2:
757 EmitI(Reg(p_0));
758 EmitI(Slot(p_0));
759 break;
760 case 3:
761 EmitI(Slot(p_0));
762 EmitI(Reg(p_0));
763 break;
764 case 4:
765 EmitI(Slot(p_0, -1), Slot(p_0), Reg(p_0), Reg(p_0, 1));
766 break;
767 default:
768 UNREACHABLE();
769 break;
770 }
771 EndBlock(Last());
772
773 Allocate();
774}
775
776
777INSTANTIATE_TEST_CASE_P(
778 RegisterAllocatorTest, SlotConstraintTest,
779 ::testing::Combine(::testing::ValuesIn(kParameterTypes),
780 ::testing::Range(0, SlotConstraintTest::kMaxVariant)));
781
782#endif // GTEST_HAS_COMBINE
783
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400784} // namespace compiler
785} // namespace internal
786} // namespace v8