blob: aba480de42319c58edd4e92c63004108ac5b2b6b [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/instruction-selector-impl.h"
6#include "src/compiler/node-matchers.h"
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12// Adds X64-specific methods for generating operands.
13class X64OperandGenerator FINAL : public OperandGenerator {
14 public:
15 explicit X64OperandGenerator(InstructionSelector* selector)
16 : OperandGenerator(selector) {}
17
18 InstructionOperand* TempRegister(Register reg) {
19 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
20 Register::ToAllocationIndex(reg));
21 }
22
Ben Murdochb8a8cc12014-11-26 15:28:44 +000023 bool CanBeImmediate(Node* node) {
24 switch (node->opcode()) {
25 case IrOpcode::kInt32Constant:
26 return true;
Emily Bernierd0a1eb72015-03-24 16:35:39 -040027 case IrOpcode::kInt64Constant: {
28 const int64_t value = OpParameter<int64_t>(node);
29 return value == static_cast<int64_t>(static_cast<int32_t>(value));
30 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000031 default:
32 return false;
33 }
34 }
35
Emily Bernierd0a1eb72015-03-24 16:35:39 -040036 AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
37 Node* base, Node* displacement,
38 InstructionOperand* inputs[],
39 size_t* input_count) {
40 AddressingMode mode = kMode_MRI;
41 if (base != NULL) {
42 inputs[(*input_count)++] = UseRegister(base);
43 if (index != NULL) {
44 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
45 inputs[(*input_count)++] = UseRegister(index);
46 if (displacement != NULL) {
47 inputs[(*input_count)++] = UseImmediate(displacement);
48 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
49 kMode_MR4I, kMode_MR8I};
50 mode = kMRnI_modes[scale_exponent];
51 } else {
52 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
53 kMode_MR4, kMode_MR8};
54 mode = kMRn_modes[scale_exponent];
55 }
56 } else {
57 if (displacement == NULL) {
58 mode = kMode_MR;
59 } else {
60 inputs[(*input_count)++] = UseImmediate(displacement);
61 mode = kMode_MRI;
62 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000063 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -040064 } else {
65 DCHECK(index != NULL);
66 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
67 inputs[(*input_count)++] = UseRegister(index);
68 if (displacement != NULL) {
69 inputs[(*input_count)++] = UseImmediate(displacement);
70 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
71 kMode_M4I, kMode_M8I};
72 mode = kMnI_modes[scale_exponent];
73 } else {
74 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
75 kMode_M4, kMode_M8};
76 mode = kMn_modes[scale_exponent];
77 if (mode == kMode_MR1) {
78 // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0]
79 inputs[(*input_count)++] = UseRegister(index);
80 }
81 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000082 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -040083 return mode;
84 }
85
86 AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
87 InstructionOperand* inputs[],
88 size_t* input_count) {
89 BaseWithIndexAndDisplacement64Matcher m(operand, true);
90 DCHECK(m.matches());
91 if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
92 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
93 m.displacement(), inputs, input_count);
94 } else {
95 inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
96 inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
97 return kMode_MR1;
98 }
99 }
100
101 bool CanBeBetterLeftOperand(Node* node) const {
102 return !selector()->IsLive(node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000103 }
104};
105
106
107void InstructionSelector::VisitLoad(Node* node) {
108 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
109 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
110 X64OperandGenerator g(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000111
112 ArchOpcode opcode;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000113 switch (rep) {
114 case kRepFloat32:
115 opcode = kX64Movss;
116 break;
117 case kRepFloat64:
118 opcode = kX64Movsd;
119 break;
120 case kRepBit: // Fall through.
121 case kRepWord8:
122 opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
123 break;
124 case kRepWord16:
125 opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
126 break;
127 case kRepWord32:
128 opcode = kX64Movl;
129 break;
130 case kRepTagged: // Fall through.
131 case kRepWord64:
132 opcode = kX64Movq;
133 break;
134 default:
135 UNREACHABLE();
136 return;
137 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400138
139 InstructionOperand* outputs[1];
140 outputs[0] = g.DefineAsRegister(node);
141 InstructionOperand* inputs[3];
142 size_t input_count = 0;
143 AddressingMode mode =
144 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
145 InstructionCode code = opcode | AddressingModeField::encode(mode);
146 Emit(code, 1, outputs, input_count, inputs);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000147}
148
149
150void InstructionSelector::VisitStore(Node* node) {
151 X64OperandGenerator g(this);
152 Node* base = node->InputAt(0);
153 Node* index = node->InputAt(1);
154 Node* value = node->InputAt(2);
155
156 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
157 MachineType rep = RepresentationOf(store_rep.machine_type());
158 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
159 DCHECK(rep == kRepTagged);
160 // TODO(dcarney): refactor RecordWrite function to take temp registers
161 // and pass them here instead of using fixed regs
162 // TODO(dcarney): handle immediate indices.
163 InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
164 Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
165 g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
166 temps);
167 return;
168 }
169 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000170 ArchOpcode opcode;
171 switch (rep) {
172 case kRepFloat32:
173 opcode = kX64Movss;
174 break;
175 case kRepFloat64:
176 opcode = kX64Movsd;
177 break;
178 case kRepBit: // Fall through.
179 case kRepWord8:
180 opcode = kX64Movb;
181 break;
182 case kRepWord16:
183 opcode = kX64Movw;
184 break;
185 case kRepWord32:
186 opcode = kX64Movl;
187 break;
188 case kRepTagged: // Fall through.
189 case kRepWord64:
190 opcode = kX64Movq;
191 break;
192 default:
193 UNREACHABLE();
194 return;
195 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400196 InstructionOperand* inputs[4];
197 size_t input_count = 0;
198 AddressingMode mode =
199 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
200 InstructionCode code = opcode | AddressingModeField::encode(mode);
201 InstructionOperand* value_operand =
202 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
203 inputs[input_count++] = value_operand;
204 Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
205}
206
207
208void InstructionSelector::VisitCheckedLoad(Node* node) {
209 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
210 MachineType typ = TypeOf(OpParameter<MachineType>(node));
211 X64OperandGenerator g(this);
212 Node* const buffer = node->InputAt(0);
213 Node* const offset = node->InputAt(1);
214 Node* const length = node->InputAt(2);
215 ArchOpcode opcode;
216 switch (rep) {
217 case kRepWord8:
218 opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
219 break;
220 case kRepWord16:
221 opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
222 break;
223 case kRepWord32:
224 opcode = kCheckedLoadWord32;
225 break;
226 case kRepFloat32:
227 opcode = kCheckedLoadFloat32;
228 break;
229 case kRepFloat64:
230 opcode = kCheckedLoadFloat64;
231 break;
232 default:
233 UNREACHABLE();
234 return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000235 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400236 if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
237 Int32Matcher mlength(length);
238 Int32BinopMatcher moffset(offset);
239 if (mlength.HasValue() && moffset.right().HasValue() &&
240 moffset.right().Value() >= 0 &&
241 mlength.Value() >= moffset.right().Value()) {
242 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
243 g.UseRegister(moffset.left().node()),
244 g.UseImmediate(moffset.right().node()), g.UseImmediate(length));
245 return;
246 }
247 }
248 InstructionOperand* length_operand =
249 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
250 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
251 g.UseRegister(offset), g.TempImmediate(0), length_operand);
252}
253
254
255void InstructionSelector::VisitCheckedStore(Node* node) {
256 MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
257 X64OperandGenerator g(this);
258 Node* const buffer = node->InputAt(0);
259 Node* const offset = node->InputAt(1);
260 Node* const length = node->InputAt(2);
261 Node* const value = node->InputAt(3);
262 ArchOpcode opcode;
263 switch (rep) {
264 case kRepWord8:
265 opcode = kCheckedStoreWord8;
266 break;
267 case kRepWord16:
268 opcode = kCheckedStoreWord16;
269 break;
270 case kRepWord32:
271 opcode = kCheckedStoreWord32;
272 break;
273 case kRepFloat32:
274 opcode = kCheckedStoreFloat32;
275 break;
276 case kRepFloat64:
277 opcode = kCheckedStoreFloat64;
278 break;
279 default:
280 UNREACHABLE();
281 return;
282 }
283 InstructionOperand* value_operand =
284 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
285 if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
286 Int32Matcher mlength(length);
287 Int32BinopMatcher moffset(offset);
288 if (mlength.HasValue() && moffset.right().HasValue() &&
289 moffset.right().Value() >= 0 &&
290 mlength.Value() >= moffset.right().Value()) {
291 Emit(opcode, nullptr, g.UseRegister(buffer),
292 g.UseRegister(moffset.left().node()),
293 g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
294 value_operand);
295 return;
296 }
297 }
298 InstructionOperand* length_operand =
299 g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
300 Emit(opcode, nullptr, g.UseRegister(buffer), g.UseRegister(offset),
301 g.TempImmediate(0), length_operand, value_operand);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000302}
303
304
305// Shared routine for multiple binary operations.
306static void VisitBinop(InstructionSelector* selector, Node* node,
307 InstructionCode opcode, FlagsContinuation* cont) {
308 X64OperandGenerator g(selector);
309 Int32BinopMatcher m(node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400310 Node* left = m.left().node();
311 Node* right = m.right().node();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000312 InstructionOperand* inputs[4];
313 size_t input_count = 0;
314 InstructionOperand* outputs[2];
315 size_t output_count = 0;
316
317 // TODO(turbofan): match complex addressing modes.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400318 if (left == right) {
319 // If both inputs refer to the same operand, enforce allocating a register
320 // for both of them to ensure that we don't end up generating code like
321 // this:
322 //
323 // mov rax, [rbp-0x10]
324 // add rax, [rbp-0x10]
325 // jo label
326 InstructionOperand* const input = g.UseRegister(left);
327 inputs[input_count++] = input;
328 inputs[input_count++] = input;
329 } else if (g.CanBeImmediate(right)) {
330 inputs[input_count++] = g.UseRegister(left);
331 inputs[input_count++] = g.UseImmediate(right);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000332 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400333 if (node->op()->HasProperty(Operator::kCommutative) &&
334 g.CanBeBetterLeftOperand(right)) {
335 std::swap(left, right);
336 }
337 inputs[input_count++] = g.UseRegister(left);
338 inputs[input_count++] = g.Use(right);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000339 }
340
341 if (cont->IsBranch()) {
342 inputs[input_count++] = g.Label(cont->true_block());
343 inputs[input_count++] = g.Label(cont->false_block());
344 }
345
346 outputs[output_count++] = g.DefineSameAsFirst(node);
347 if (cont->IsSet()) {
348 outputs[output_count++] = g.DefineAsRegister(cont->result());
349 }
350
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400351 DCHECK_NE(0, static_cast<int>(input_count));
352 DCHECK_NE(0, static_cast<int>(output_count));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000353 DCHECK_GE(arraysize(inputs), input_count);
354 DCHECK_GE(arraysize(outputs), output_count);
355
356 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
357 outputs, input_count, inputs);
358 if (cont->IsBranch()) instr->MarkAsControl();
359}
360
361
362// Shared routine for multiple binary operations.
363static void VisitBinop(InstructionSelector* selector, Node* node,
364 InstructionCode opcode) {
365 FlagsContinuation cont;
366 VisitBinop(selector, node, opcode, &cont);
367}
368
369
370void InstructionSelector::VisitWord32And(Node* node) {
371 VisitBinop(this, node, kX64And32);
372}
373
374
375void InstructionSelector::VisitWord64And(Node* node) {
376 VisitBinop(this, node, kX64And);
377}
378
379
380void InstructionSelector::VisitWord32Or(Node* node) {
381 VisitBinop(this, node, kX64Or32);
382}
383
384
385void InstructionSelector::VisitWord64Or(Node* node) {
386 VisitBinop(this, node, kX64Or);
387}
388
389
390void InstructionSelector::VisitWord32Xor(Node* node) {
391 X64OperandGenerator g(this);
392 Uint32BinopMatcher m(node);
393 if (m.right().Is(-1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400394 Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000395 } else {
396 VisitBinop(this, node, kX64Xor32);
397 }
398}
399
400
401void InstructionSelector::VisitWord64Xor(Node* node) {
402 X64OperandGenerator g(this);
403 Uint64BinopMatcher m(node);
404 if (m.right().Is(-1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400405 Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000406 } else {
407 VisitBinop(this, node, kX64Xor);
408 }
409}
410
411
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400412namespace {
413
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000414// Shared routine for multiple 32-bit shift operations.
415// TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400416void VisitWord32Shift(InstructionSelector* selector, Node* node,
417 ArchOpcode opcode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000418 X64OperandGenerator g(selector);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400419 Int32BinopMatcher m(node);
420 Node* left = m.left().node();
421 Node* right = m.right().node();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000422
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000423 if (g.CanBeImmediate(right)) {
424 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
425 g.UseImmediate(right));
426 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000427 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
428 g.UseFixed(right, rcx));
429 }
430}
431
432
433// Shared routine for multiple 64-bit shift operations.
434// TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400435void VisitWord64Shift(InstructionSelector* selector, Node* node,
436 ArchOpcode opcode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000437 X64OperandGenerator g(selector);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400438 Int64BinopMatcher m(node);
439 Node* left = m.left().node();
440 Node* right = m.right().node();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000441
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000442 if (g.CanBeImmediate(right)) {
443 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
444 g.UseImmediate(right));
445 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000446 if (m.right().IsWord64And()) {
447 Int64BinopMatcher mright(right);
448 if (mright.right().Is(0x3F)) {
449 right = mright.left().node();
450 }
451 }
452 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
453 g.UseFixed(right, rcx));
454 }
455}
456
457
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400458void EmitLea(InstructionSelector* selector, InstructionCode opcode,
459 Node* result, Node* index, int scale, Node* base,
460 Node* displacement) {
461 X64OperandGenerator g(selector);
462
463 InstructionOperand* inputs[4];
464 size_t input_count = 0;
465 AddressingMode mode = g.GenerateMemoryOperandInputs(
466 index, scale, base, displacement, inputs, &input_count);
467
468 DCHECK_NE(0, static_cast<int>(input_count));
469 DCHECK_GE(arraysize(inputs), input_count);
470
471 InstructionOperand* outputs[1];
472 outputs[0] = g.DefineAsRegister(result);
473
474 opcode = AddressingModeField::encode(mode) | opcode;
475
476 selector->Emit(opcode, 1, outputs, input_count, inputs);
477}
478
479} // namespace
480
481
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000482void InstructionSelector::VisitWord32Shl(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400483 Int32ScaleMatcher m(node, true);
484 if (m.matches()) {
485 Node* index = node->InputAt(0);
486 Node* base = m.power_of_two_plus_one() ? index : NULL;
487 EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
488 return;
489 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000490 VisitWord32Shift(this, node, kX64Shl32);
491}
492
493
494void InstructionSelector::VisitWord64Shl(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400495 X64OperandGenerator g(this);
496 Int64BinopMatcher m(node);
497 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
498 m.right().IsInRange(32, 63)) {
499 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
500 // 32 bits anyway.
501 Emit(kX64Shl, g.DefineSameAsFirst(node),
502 g.UseRegister(m.left().node()->InputAt(0)),
503 g.UseImmediate(m.right().node()));
504 return;
505 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000506 VisitWord64Shift(this, node, kX64Shl);
507}
508
509
510void InstructionSelector::VisitWord32Shr(Node* node) {
511 VisitWord32Shift(this, node, kX64Shr32);
512}
513
514
515void InstructionSelector::VisitWord64Shr(Node* node) {
516 VisitWord64Shift(this, node, kX64Shr);
517}
518
519
520void InstructionSelector::VisitWord32Sar(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400521 X64OperandGenerator g(this);
522 Int32BinopMatcher m(node);
523 if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
524 Int32BinopMatcher mleft(m.left().node());
525 if (mleft.right().Is(16) && m.right().Is(16)) {
526 Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
527 return;
528 } else if (mleft.right().Is(24) && m.right().Is(24)) {
529 Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
530 return;
531 }
532 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000533 VisitWord32Shift(this, node, kX64Sar32);
534}
535
536
537void InstructionSelector::VisitWord64Sar(Node* node) {
538 VisitWord64Shift(this, node, kX64Sar);
539}
540
541
542void InstructionSelector::VisitWord32Ror(Node* node) {
543 VisitWord32Shift(this, node, kX64Ror32);
544}
545
546
547void InstructionSelector::VisitWord64Ror(Node* node) {
548 VisitWord64Shift(this, node, kX64Ror);
549}
550
551
552void InstructionSelector::VisitInt32Add(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400553 X64OperandGenerator g(this);
554
555 // Try to match the Add to a leal pattern
556 BaseWithIndexAndDisplacement32Matcher m(node);
557 if (m.matches() &&
558 (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
559 EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
560 m.displacement());
561 return;
562 }
563
564 // No leal pattern match, use addl
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000565 VisitBinop(this, node, kX64Add32);
566}
567
568
569void InstructionSelector::VisitInt64Add(Node* node) {
570 VisitBinop(this, node, kX64Add);
571}
572
573
574void InstructionSelector::VisitInt32Sub(Node* node) {
575 X64OperandGenerator g(this);
576 Int32BinopMatcher m(node);
577 if (m.left().Is(0)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400578 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000579 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400580 if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
581 // Turn subtractions of constant values into immediate "leal" instructions
582 // by negating the value.
583 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
584 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
585 g.TempImmediate(-m.right().Value()));
586 return;
587 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000588 VisitBinop(this, node, kX64Sub32);
589 }
590}
591
592
593void InstructionSelector::VisitInt64Sub(Node* node) {
594 X64OperandGenerator g(this);
595 Int64BinopMatcher m(node);
596 if (m.left().Is(0)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400597 Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000598 } else {
599 VisitBinop(this, node, kX64Sub);
600 }
601}
602
603
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400604namespace {
605
606void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000607 X64OperandGenerator g(selector);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400608 Int32BinopMatcher m(node);
609 Node* left = m.left().node();
610 Node* right = m.right().node();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000611 if (g.CanBeImmediate(right)) {
612 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
613 g.UseImmediate(right));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000614 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400615 if (g.CanBeBetterLeftOperand(right)) {
616 std::swap(left, right);
617 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000618 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
619 g.Use(right));
620 }
621}
622
623
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400624void VisitMulHigh(InstructionSelector* selector, Node* node,
625 ArchOpcode opcode) {
626 X64OperandGenerator g(selector);
627 Node* left = node->InputAt(0);
628 Node* right = node->InputAt(1);
629 if (selector->IsLive(left) && !selector->IsLive(right)) {
630 std::swap(left, right);
631 }
632 // TODO(turbofan): We use UseUniqueRegister here to improve register
633 // allocation.
634 selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
635 g.UseUniqueRegister(right));
636}
637
638
639void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
640 X64OperandGenerator g(selector);
641 InstructionOperand* temps[] = {g.TempRegister(rdx)};
642 selector->Emit(
643 opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
644 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
645}
646
647
648void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
649 X64OperandGenerator g(selector);
650 selector->Emit(opcode, g.DefineAsFixed(node, rdx),
651 g.UseFixed(node->InputAt(0), rax),
652 g.UseUniqueRegister(node->InputAt(1)));
653}
654
655} // namespace
656
657
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000658void InstructionSelector::VisitInt32Mul(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400659 Int32ScaleMatcher m(node, true);
660 if (m.matches()) {
661 Node* index = node->InputAt(0);
662 Node* base = m.power_of_two_plus_one() ? index : NULL;
663 EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
664 return;
665 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000666 VisitMul(this, node, kX64Imul32);
667}
668
669
670void InstructionSelector::VisitInt64Mul(Node* node) {
671 VisitMul(this, node, kX64Imul);
672}
673
674
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400675void InstructionSelector::VisitInt32MulHigh(Node* node) {
676 VisitMulHigh(this, node, kX64ImulHigh32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000677}
678
679
680void InstructionSelector::VisitInt32Div(Node* node) {
681 VisitDiv(this, node, kX64Idiv32);
682}
683
684
685void InstructionSelector::VisitInt64Div(Node* node) {
686 VisitDiv(this, node, kX64Idiv);
687}
688
689
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400690void InstructionSelector::VisitUint32Div(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000691 VisitDiv(this, node, kX64Udiv32);
692}
693
694
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400695void InstructionSelector::VisitUint64Div(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000696 VisitDiv(this, node, kX64Udiv);
697}
698
699
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000700void InstructionSelector::VisitInt32Mod(Node* node) {
701 VisitMod(this, node, kX64Idiv32);
702}
703
704
705void InstructionSelector::VisitInt64Mod(Node* node) {
706 VisitMod(this, node, kX64Idiv);
707}
708
709
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400710void InstructionSelector::VisitUint32Mod(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000711 VisitMod(this, node, kX64Udiv32);
712}
713
714
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400715void InstructionSelector::VisitUint64Mod(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000716 VisitMod(this, node, kX64Udiv);
717}
718
719
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400720void InstructionSelector::VisitUint32MulHigh(Node* node) {
721 VisitMulHigh(this, node, kX64UmulHigh32);
722}
723
724
725void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
726 X64OperandGenerator g(this);
727 Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
728}
729
730
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000731void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
732 X64OperandGenerator g(this);
733 Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
734}
735
736
737void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
738 X64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400739 Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000740}
741
742
743void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
744 X64OperandGenerator g(this);
745 Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
746}
747
748
749void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
750 X64OperandGenerator g(this);
751 Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
752}
753
754
755void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
756 X64OperandGenerator g(this);
757 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
758}
759
760
761void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
762 X64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400763 Node* value = node->InputAt(0);
764 switch (value->opcode()) {
765 case IrOpcode::kWord32And:
766 case IrOpcode::kWord32Or:
767 case IrOpcode::kWord32Xor:
768 case IrOpcode::kWord32Shl:
769 case IrOpcode::kWord32Shr:
770 case IrOpcode::kWord32Sar:
771 case IrOpcode::kWord32Ror:
772 case IrOpcode::kWord32Equal:
773 case IrOpcode::kInt32Add:
774 case IrOpcode::kInt32Sub:
775 case IrOpcode::kInt32Mul:
776 case IrOpcode::kInt32MulHigh:
777 case IrOpcode::kInt32Div:
778 case IrOpcode::kInt32LessThan:
779 case IrOpcode::kInt32LessThanOrEqual:
780 case IrOpcode::kInt32Mod:
781 case IrOpcode::kUint32Div:
782 case IrOpcode::kUint32LessThan:
783 case IrOpcode::kUint32LessThanOrEqual:
784 case IrOpcode::kUint32Mod:
785 case IrOpcode::kUint32MulHigh: {
786 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
787 // zero-extension is a no-op.
788 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
789 return;
790 }
791 default:
792 break;
793 }
794 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
795}
796
797
798void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
799 X64OperandGenerator g(this);
800 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000801}
802
803
804void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
805 X64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400806 Node* value = node->InputAt(0);
807 if (CanCover(node, value)) {
808 switch (value->opcode()) {
809 case IrOpcode::kWord64Sar:
810 case IrOpcode::kWord64Shr: {
811 Int64BinopMatcher m(value);
812 if (m.right().Is(32)) {
813 Emit(kX64Shr, g.DefineSameAsFirst(node),
814 g.UseRegister(m.left().node()), g.TempImmediate(32));
815 return;
816 }
817 break;
818 }
819 default:
820 break;
821 }
822 }
823 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000824}
825
826
827void InstructionSelector::VisitFloat64Add(Node* node) {
828 X64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400829 if (IsSupported(AVX)) {
830 Emit(kAVXFloat64Add, g.DefineAsRegister(node),
831 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
832 } else {
833 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
834 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
835 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000836}
837
838
839void InstructionSelector::VisitFloat64Sub(Node* node) {
840 X64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400841 if (IsSupported(AVX)) {
842 Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
843 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
844 } else {
845 Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
846 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
847 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000848}
849
850
851void InstructionSelector::VisitFloat64Mul(Node* node) {
852 X64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400853 if (IsSupported(AVX)) {
854 Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
855 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
856 } else {
857 Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
858 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
859 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000860}
861
862
863void InstructionSelector::VisitFloat64Div(Node* node) {
864 X64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400865 if (IsSupported(AVX)) {
866 Emit(kAVXFloat64Div, g.DefineAsRegister(node),
867 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
868 } else {
869 Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
870 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
871 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000872}
873
874
875void InstructionSelector::VisitFloat64Mod(Node* node) {
876 X64OperandGenerator g(this);
877 InstructionOperand* temps[] = {g.TempRegister(rax)};
878 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
879 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
880 temps);
881}
882
883
884void InstructionSelector::VisitFloat64Sqrt(Node* node) {
885 X64OperandGenerator g(this);
886 Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
887}
888
889
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400890namespace {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000891
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400892void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
893 Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000894 X64OperandGenerator g(selector);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400895 selector->Emit(opcode, g.DefineAsRegister(node),
896 g.UseRegister(node->InputAt(0)));
897}
898
899} // namespace
900
901
902void InstructionSelector::VisitFloat64Floor(Node* node) {
903 DCHECK(CpuFeatures::IsSupported(SSE4_1));
904 VisitRRFloat64(this, kSSEFloat64Floor, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000905}
906
907
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400908void InstructionSelector::VisitFloat64Ceil(Node* node) {
909 DCHECK(CpuFeatures::IsSupported(SSE4_1));
910 VisitRRFloat64(this, kSSEFloat64Ceil, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000911}
912
913
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400914void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
915 DCHECK(CpuFeatures::IsSupported(SSE4_1));
916 VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
917}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000918
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400919
920void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
921 UNREACHABLE();
922}
923
924
925void InstructionSelector::VisitCall(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000926 X64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400927 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000928
929 FrameStateDescriptor* frame_state_descriptor = NULL;
930 if (descriptor->NeedsFrameState()) {
931 frame_state_descriptor = GetFrameStateDescriptor(
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400932 node->InputAt(static_cast<int>(descriptor->InputCount())));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000933 }
934
935 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
936
937 // Compute InstructionOperands for inputs and outputs.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400938 InitializeCallBuffer(node, &buffer, true, true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000939
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000940 // Push any stack arguments.
941 for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
942 input != buffer.pushed_nodes.rend(); input++) {
943 // TODO(titzer): handle pushing double parameters.
944 Emit(kX64Push, NULL,
945 g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
946 }
947
948 // Select the appropriate opcode based on the call type.
949 InstructionCode opcode;
950 switch (descriptor->kind()) {
951 case CallDescriptor::kCallCodeObject: {
952 opcode = kArchCallCodeObject;
953 break;
954 }
955 case CallDescriptor::kCallJSFunction:
956 opcode = kArchCallJSFunction;
957 break;
958 default:
959 UNREACHABLE();
960 return;
961 }
962 opcode |= MiscField::encode(descriptor->flags());
963
964 // Emit the call instruction.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400965 InstructionOperand** first_output =
966 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000967 Instruction* call_instr =
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400968 Emit(opcode, buffer.outputs.size(), first_output,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000969 buffer.instruction_args.size(), &buffer.instruction_args.front());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000970 call_instr->MarkAsCall();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400971}
972
973
974// Shared routine for multiple compare operations.
975static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
976 InstructionOperand* left, InstructionOperand* right,
977 FlagsContinuation* cont) {
978 X64OperandGenerator g(selector);
979 opcode = cont->Encode(opcode);
980 if (cont->IsBranch()) {
981 selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
982 g.Label(cont->false_block()))->MarkAsControl();
983 } else {
984 DCHECK(cont->IsSet());
985 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000986 }
987}
988
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400989
990// Shared routine for multiple compare operations.
991static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
992 Node* left, Node* right, FlagsContinuation* cont,
993 bool commutative) {
994 X64OperandGenerator g(selector);
995 if (commutative && g.CanBeBetterLeftOperand(right)) {
996 std::swap(left, right);
997 }
998 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
999}
1000
1001
1002// Shared routine for multiple word compare operations.
1003static void VisitWordCompare(InstructionSelector* selector, Node* node,
1004 InstructionCode opcode, FlagsContinuation* cont) {
1005 X64OperandGenerator g(selector);
1006 Node* const left = node->InputAt(0);
1007 Node* const right = node->InputAt(1);
1008
1009 // Match immediates on left or right side of comparison.
1010 if (g.CanBeImmediate(right)) {
1011 VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
1012 } else if (g.CanBeImmediate(left)) {
1013 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1014 VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
1015 } else {
1016 VisitCompare(selector, opcode, left, right, cont,
1017 node->op()->HasProperty(Operator::kCommutative));
1018 }
1019}
1020
1021
1022// Shared routine for comparison with zero.
1023static void VisitCompareZero(InstructionSelector* selector, Node* node,
1024 InstructionCode opcode, FlagsContinuation* cont) {
1025 X64OperandGenerator g(selector);
1026 VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
1027}
1028
1029
1030// Shared routine for multiple float64 compare operations.
1031static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1032 FlagsContinuation* cont) {
1033 VisitCompare(selector, kSSEFloat64Cmp, node->InputAt(0), node->InputAt(1),
1034 cont, node->op()->HasProperty(Operator::kCommutative));
1035}
1036
1037
1038void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1039 BasicBlock* fbranch) {
1040 X64OperandGenerator g(this);
1041 Node* user = branch;
1042 Node* value = branch->InputAt(0);
1043
1044 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1045
1046 // Try to combine with comparisons against 0 by simply inverting the branch.
1047 while (CanCover(user, value)) {
1048 if (value->opcode() == IrOpcode::kWord32Equal) {
1049 Int32BinopMatcher m(value);
1050 if (m.right().Is(0)) {
1051 user = value;
1052 value = m.left().node();
1053 cont.Negate();
1054 } else {
1055 break;
1056 }
1057 } else if (value->opcode() == IrOpcode::kWord64Equal) {
1058 Int64BinopMatcher m(value);
1059 if (m.right().Is(0)) {
1060 user = value;
1061 value = m.left().node();
1062 cont.Negate();
1063 } else {
1064 break;
1065 }
1066 } else {
1067 break;
1068 }
1069 }
1070
1071 // Try to combine the branch with a comparison.
1072 if (CanCover(user, value)) {
1073 switch (value->opcode()) {
1074 case IrOpcode::kWord32Equal:
1075 cont.OverwriteAndNegateIfEqual(kEqual);
1076 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1077 case IrOpcode::kInt32LessThan:
1078 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1079 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1080 case IrOpcode::kInt32LessThanOrEqual:
1081 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1082 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1083 case IrOpcode::kUint32LessThan:
1084 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1085 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1086 case IrOpcode::kUint32LessThanOrEqual:
1087 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1088 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1089 case IrOpcode::kWord64Equal:
1090 cont.OverwriteAndNegateIfEqual(kEqual);
1091 return VisitWordCompare(this, value, kX64Cmp, &cont);
1092 case IrOpcode::kInt64LessThan:
1093 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1094 return VisitWordCompare(this, value, kX64Cmp, &cont);
1095 case IrOpcode::kInt64LessThanOrEqual:
1096 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1097 return VisitWordCompare(this, value, kX64Cmp, &cont);
1098 case IrOpcode::kUint64LessThan:
1099 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1100 return VisitWordCompare(this, value, kX64Cmp, &cont);
1101 case IrOpcode::kFloat64Equal:
1102 cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
1103 return VisitFloat64Compare(this, value, &cont);
1104 case IrOpcode::kFloat64LessThan:
1105 cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
1106 return VisitFloat64Compare(this, value, &cont);
1107 case IrOpcode::kFloat64LessThanOrEqual:
1108 cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
1109 return VisitFloat64Compare(this, value, &cont);
1110 case IrOpcode::kProjection:
1111 // Check if this is the overflow output projection of an
1112 // <Operation>WithOverflow node.
1113 if (OpParameter<size_t>(value) == 1u) {
1114 // We cannot combine the <Operation>WithOverflow with this branch
1115 // unless the 0th projection (the use of the actual value of the
1116 // <Operation> is either NULL, which means there's no use of the
1117 // actual value, or was already defined, which means it is scheduled
1118 // *AFTER* this branch).
1119 Node* node = value->InputAt(0);
1120 Node* result = node->FindProjection(0);
1121 if (result == NULL || IsDefined(result)) {
1122 switch (node->opcode()) {
1123 case IrOpcode::kInt32AddWithOverflow:
1124 cont.OverwriteAndNegateIfEqual(kOverflow);
1125 return VisitBinop(this, node, kX64Add32, &cont);
1126 case IrOpcode::kInt32SubWithOverflow:
1127 cont.OverwriteAndNegateIfEqual(kOverflow);
1128 return VisitBinop(this, node, kX64Sub32, &cont);
1129 default:
1130 break;
1131 }
1132 }
1133 }
1134 break;
1135 case IrOpcode::kInt32Sub:
1136 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1137 case IrOpcode::kInt64Sub:
1138 return VisitWordCompare(this, value, kX64Cmp, &cont);
1139 case IrOpcode::kWord32And:
1140 return VisitWordCompare(this, value, kX64Test32, &cont);
1141 case IrOpcode::kWord64And:
1142 return VisitWordCompare(this, value, kX64Test, &cont);
1143 default:
1144 break;
1145 }
1146 }
1147
1148 // Branch could not be combined with a compare, emit compare against 0.
1149 VisitCompareZero(this, value, kX64Cmp32, &cont);
1150}
1151
1152
1153void InstructionSelector::VisitWord32Equal(Node* const node) {
1154 Node* user = node;
1155 FlagsContinuation cont(kEqual, node);
1156 Int32BinopMatcher m(user);
1157 if (m.right().Is(0)) {
1158 Node* value = m.left().node();
1159
1160 // Try to combine with comparisons against 0 by simply inverting the branch.
1161 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
1162 Int32BinopMatcher m(value);
1163 if (m.right().Is(0)) {
1164 user = value;
1165 value = m.left().node();
1166 cont.Negate();
1167 } else {
1168 break;
1169 }
1170 }
1171
1172 // Try to combine the branch with a comparison.
1173 if (CanCover(user, value)) {
1174 switch (value->opcode()) {
1175 case IrOpcode::kInt32Sub:
1176 return VisitWordCompare(this, value, kX64Cmp32, &cont);
1177 case IrOpcode::kWord32And:
1178 return VisitWordCompare(this, value, kX64Test32, &cont);
1179 default:
1180 break;
1181 }
1182 }
1183 return VisitCompareZero(this, value, kX64Cmp32, &cont);
1184 }
1185 VisitWordCompare(this, node, kX64Cmp32, &cont);
1186}
1187
1188
1189void InstructionSelector::VisitInt32LessThan(Node* node) {
1190 FlagsContinuation cont(kSignedLessThan, node);
1191 VisitWordCompare(this, node, kX64Cmp32, &cont);
1192}
1193
1194
1195void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1196 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1197 VisitWordCompare(this, node, kX64Cmp32, &cont);
1198}
1199
1200
1201void InstructionSelector::VisitUint32LessThan(Node* node) {
1202 FlagsContinuation cont(kUnsignedLessThan, node);
1203 VisitWordCompare(this, node, kX64Cmp32, &cont);
1204}
1205
1206
1207void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1208 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1209 VisitWordCompare(this, node, kX64Cmp32, &cont);
1210}
1211
1212
1213void InstructionSelector::VisitWord64Equal(Node* const node) {
1214 Node* user = node;
1215 FlagsContinuation cont(kEqual, node);
1216 Int64BinopMatcher m(user);
1217 if (m.right().Is(0)) {
1218 Node* value = m.left().node();
1219
1220 // Try to combine with comparisons against 0 by simply inverting the branch.
1221 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord64Equal) {
1222 Int64BinopMatcher m(value);
1223 if (m.right().Is(0)) {
1224 user = value;
1225 value = m.left().node();
1226 cont.Negate();
1227 } else {
1228 break;
1229 }
1230 }
1231
1232 // Try to combine the branch with a comparison.
1233 if (CanCover(user, value)) {
1234 switch (value->opcode()) {
1235 case IrOpcode::kInt64Sub:
1236 return VisitWordCompare(this, value, kX64Cmp, &cont);
1237 case IrOpcode::kWord64And:
1238 return VisitWordCompare(this, value, kX64Test, &cont);
1239 default:
1240 break;
1241 }
1242 }
1243 return VisitCompareZero(this, value, kX64Cmp, &cont);
1244 }
1245 VisitWordCompare(this, node, kX64Cmp, &cont);
1246}
1247
1248
1249void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1250 if (Node* ovf = node->FindProjection(1)) {
1251 FlagsContinuation cont(kOverflow, ovf);
1252 VisitBinop(this, node, kX64Add32, &cont);
1253 }
1254 FlagsContinuation cont;
1255 VisitBinop(this, node, kX64Add32, &cont);
1256}
1257
1258
1259void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1260 if (Node* ovf = node->FindProjection(1)) {
1261 FlagsContinuation cont(kOverflow, ovf);
1262 return VisitBinop(this, node, kX64Sub32, &cont);
1263 }
1264 FlagsContinuation cont;
1265 VisitBinop(this, node, kX64Sub32, &cont);
1266}
1267
1268
1269void InstructionSelector::VisitInt64LessThan(Node* node) {
1270 FlagsContinuation cont(kSignedLessThan, node);
1271 VisitWordCompare(this, node, kX64Cmp, &cont);
1272}
1273
1274
1275void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
1276 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1277 VisitWordCompare(this, node, kX64Cmp, &cont);
1278}
1279
1280
1281void InstructionSelector::VisitUint64LessThan(Node* node) {
1282 FlagsContinuation cont(kUnsignedLessThan, node);
1283 VisitWordCompare(this, node, kX64Cmp, &cont);
1284}
1285
1286
1287void InstructionSelector::VisitFloat64Equal(Node* node) {
1288 FlagsContinuation cont(kUnorderedEqual, node);
1289 VisitFloat64Compare(this, node, &cont);
1290}
1291
1292
1293void InstructionSelector::VisitFloat64LessThan(Node* node) {
1294 FlagsContinuation cont(kUnorderedLessThan, node);
1295 VisitFloat64Compare(this, node, &cont);
1296}
1297
1298
1299void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1300 FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
1301 VisitFloat64Compare(this, node, &cont);
1302}
1303
1304
1305// static
1306MachineOperatorBuilder::Flags
1307InstructionSelector::SupportedMachineOperatorFlags() {
1308 if (CpuFeatures::IsSupported(SSE4_1)) {
1309 return MachineOperatorBuilder::kFloat64Floor |
1310 MachineOperatorBuilder::kFloat64Ceil |
1311 MachineOperatorBuilder::kFloat64RoundTruncate |
1312 MachineOperatorBuilder::kWord32ShiftIsSafe;
1313 }
1314 return MachineOperatorBuilder::kNoFlags;
1315}
1316
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001317} // namespace compiler
1318} // namespace internal
1319} // namespace v8