blob: 1ec5ab4c41a59a08d680582b08e9d4db04dd0e7a [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/instruction-selector-impl.h"
6#include "src/compiler/node-matchers.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007#include "src/compiler/node-properties.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00008
9namespace v8 {
10namespace internal {
11namespace compiler {
12
13enum ImmediateMode {
14 kArithmeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
15 kShift32Imm, // 0 - 31
16 kShift64Imm, // 0 - 63
17 kLogical32Imm,
18 kLogical64Imm,
19 kLoadStoreImm8, // signed 8 bit or 12 bit unsigned scaled by access size
20 kLoadStoreImm16,
21 kLoadStoreImm32,
22 kLoadStoreImm64,
23 kNoImmediate
24};
25
26
27// Adds Arm64-specific methods for generating operands.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000028class Arm64OperandGenerator final : public OperandGenerator {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000029 public:
30 explicit Arm64OperandGenerator(InstructionSelector* selector)
31 : OperandGenerator(selector) {}
32
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000033 InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000034 if (CanBeImmediate(node, mode)) {
35 return UseImmediate(node);
36 }
37 return UseRegister(node);
38 }
39
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000040 // Use the zero register if the node has the immediate value zero, otherwise
41 // assign a register.
42 InstructionOperand UseRegisterOrImmediateZero(Node* node) {
43 if (IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) {
44 return UseImmediate(node);
45 }
46 return UseRegister(node);
47 }
48
49 // Use the provided node if it has the required value, or create a
50 // TempImmediate otherwise.
51 InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
52 if (GetIntegerConstantValue(node) == value) {
53 return UseImmediate(node);
54 }
55 return TempImmediate(value);
56 }
57
58 bool IsIntegerConstant(Node* node) {
59 return (node->opcode() == IrOpcode::kInt32Constant) ||
60 (node->opcode() == IrOpcode::kInt64Constant);
61 }
62
63 int64_t GetIntegerConstantValue(Node* node) {
64 if (node->opcode() == IrOpcode::kInt32Constant) {
65 return OpParameter<int32_t>(node);
66 }
67 DCHECK(node->opcode() == IrOpcode::kInt64Constant);
68 return OpParameter<int64_t>(node);
69 }
70
Ben Murdochb8a8cc12014-11-26 15:28:44 +000071 bool CanBeImmediate(Node* node, ImmediateMode mode) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000072 return IsIntegerConstant(node) &&
73 CanBeImmediate(GetIntegerConstantValue(node), mode);
Emily Bernierd0a1eb72015-03-24 16:35:39 -040074 }
75
76 bool CanBeImmediate(int64_t value, ImmediateMode mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000077 unsigned ignored;
78 switch (mode) {
79 case kLogical32Imm:
80 // TODO(dcarney): some unencodable values can be handled by
81 // switching instructions.
82 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
83 &ignored, &ignored, &ignored);
84 case kLogical64Imm:
85 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
86 &ignored, &ignored, &ignored);
87 case kArithmeticImm:
Ben Murdochb8a8cc12014-11-26 15:28:44 +000088 return Assembler::IsImmAddSub(value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000089 case kLoadStoreImm8:
90 return IsLoadStoreImmediate(value, LSByte);
91 case kLoadStoreImm16:
92 return IsLoadStoreImmediate(value, LSHalfword);
93 case kLoadStoreImm32:
94 return IsLoadStoreImmediate(value, LSWord);
95 case kLoadStoreImm64:
96 return IsLoadStoreImmediate(value, LSDoubleWord);
97 case kNoImmediate:
98 return false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000099 case kShift32Imm: // Fall through.
100 case kShift64Imm:
101 // Shift operations only observe the bottom 5 or 6 bits of the value.
102 // All possible shifts can be encoded by discarding bits which have no
103 // effect.
104 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000105 }
106 return false;
107 }
108
109 private:
110 bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
111 return Assembler::IsImmLSScaled(value, size) ||
112 Assembler::IsImmLSUnscaled(value);
113 }
114};
115
116
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000117namespace {
118
119void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400120 Arm64OperandGenerator g(selector);
121 selector->Emit(opcode, g.DefineAsRegister(node),
122 g.UseRegister(node->InputAt(0)));
123}
124
125
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000126void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000127 Arm64OperandGenerator g(selector);
128 selector->Emit(opcode, g.DefineAsRegister(node),
129 g.UseRegister(node->InputAt(0)),
130 g.UseRegister(node->InputAt(1)));
131}
132
133
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000134void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
135 ImmediateMode operand_mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000136 Arm64OperandGenerator g(selector);
137 selector->Emit(opcode, g.DefineAsRegister(node),
138 g.UseRegister(node->InputAt(0)),
139 g.UseOperand(node->InputAt(1), operand_mode));
140}
141
142
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000143bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
144 Node* input_node, InstructionCode* opcode, bool try_ror) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400145 Arm64OperandGenerator g(selector);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000146
147 if (!selector->CanCover(node, input_node)) return false;
148 if (input_node->InputCount() != 2) return false;
149 if (!g.IsIntegerConstant(input_node->InputAt(1))) return false;
150
151 switch (input_node->opcode()) {
152 case IrOpcode::kWord32Shl:
153 case IrOpcode::kWord64Shl:
154 *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
155 return true;
156 case IrOpcode::kWord32Shr:
157 case IrOpcode::kWord64Shr:
158 *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
159 return true;
160 case IrOpcode::kWord32Sar:
161 case IrOpcode::kWord64Sar:
162 *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
163 return true;
164 case IrOpcode::kWord32Ror:
165 case IrOpcode::kWord64Ror:
166 if (try_ror) {
167 *opcode |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
168 return true;
169 }
170 return false;
171 default:
172 return false;
173 }
174}
175
176
177bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
178 Node* node, Node* left_node, Node* right_node,
179 InstructionOperand* left_op,
180 InstructionOperand* right_op, InstructionCode* opcode) {
181 if (!selector->CanCover(node, right_node)) return false;
182
183 NodeMatcher nm(right_node);
184
185 if (nm.IsWord32And()) {
186 Int32BinopMatcher mright(right_node);
187 if (mright.right().Is(0xff) || mright.right().Is(0xffff)) {
188 int32_t mask = mright.right().Value();
189 *left_op = g->UseRegister(left_node);
190 *right_op = g->UseRegister(mright.left().node());
191 *opcode |= AddressingModeField::encode(
192 (mask == 0xff) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
193 return true;
194 }
195 } else if (nm.IsWord32Sar()) {
196 Int32BinopMatcher mright(right_node);
197 if (selector->CanCover(mright.node(), mright.left().node()) &&
198 mright.left().IsWord32Shl()) {
199 Int32BinopMatcher mleft_of_right(mright.left().node());
200 if ((mright.right().Is(16) && mleft_of_right.right().Is(16)) ||
201 (mright.right().Is(24) && mleft_of_right.right().Is(24))) {
202 int32_t shift = mright.right().Value();
203 *left_op = g->UseRegister(left_node);
204 *right_op = g->UseRegister(mleft_of_right.left().node());
205 *opcode |= AddressingModeField::encode(
206 (shift == 24) ? kMode_Operand2_R_SXTB : kMode_Operand2_R_SXTH);
207 return true;
208 }
209 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400210 }
211 return false;
212}
213
214
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000215// Shared routine for multiple binary operations.
216template <typename Matcher>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000217void VisitBinop(InstructionSelector* selector, Node* node,
218 InstructionCode opcode, ImmediateMode operand_mode,
219 FlagsContinuation* cont) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000220 Arm64OperandGenerator g(selector);
221 Matcher m(node);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000222 InstructionOperand inputs[5];
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000223 size_t input_count = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000224 InstructionOperand outputs[2];
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000225 size_t output_count = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000226 bool is_cmp = (opcode == kArm64Cmp32) || (opcode == kArm64Cmn32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000227
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000228 // We can commute cmp by switching the inputs and commuting the flags
229 // continuation.
230 bool can_commute = m.HasProperty(Operator::kCommutative) || is_cmp;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400231
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000232 // The cmp and cmn instructions are encoded as sub or add with zero output
233 // register, and therefore support the same operand modes.
234 bool is_add_sub = m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() ||
235 m.IsInt64Sub() || is_cmp;
236
237 Node* left_node = m.left().node();
238 Node* right_node = m.right().node();
239
240 if (g.CanBeImmediate(right_node, operand_mode)) {
241 inputs[input_count++] = g.UseRegister(left_node);
242 inputs[input_count++] = g.UseImmediate(right_node);
243 } else if (is_cmp && g.CanBeImmediate(left_node, operand_mode)) {
244 cont->Commute();
245 inputs[input_count++] = g.UseRegister(right_node);
246 inputs[input_count++] = g.UseImmediate(left_node);
247 } else if (is_add_sub &&
248 TryMatchAnyExtend(&g, selector, node, left_node, right_node,
249 &inputs[0], &inputs[1], &opcode)) {
250 input_count += 2;
251 } else if (is_add_sub && can_commute &&
252 TryMatchAnyExtend(&g, selector, node, right_node, left_node,
253 &inputs[0], &inputs[1], &opcode)) {
254 if (is_cmp) cont->Commute();
255 input_count += 2;
256 } else if (TryMatchAnyShift(selector, node, right_node, &opcode,
257 !is_add_sub)) {
258 Matcher m_shift(right_node);
259 inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400260 inputs[input_count++] = g.UseRegister(m_shift.left().node());
261 inputs[input_count++] = g.UseImmediate(m_shift.right().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000262 } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
263 !is_add_sub)) {
264 if (is_cmp) cont->Commute();
265 Matcher m_shift(left_node);
266 inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400267 inputs[input_count++] = g.UseRegister(m_shift.left().node());
268 inputs[input_count++] = g.UseImmediate(m_shift.right().node());
269 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000270 inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
271 inputs[input_count++] = g.UseRegister(right_node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400272 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000273
274 if (cont->IsBranch()) {
275 inputs[input_count++] = g.Label(cont->true_block());
276 inputs[input_count++] = g.Label(cont->false_block());
277 }
278
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000279 if (!is_cmp) {
280 outputs[output_count++] = g.DefineAsRegister(node);
281 }
282
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000283 if (cont->IsSet()) {
284 outputs[output_count++] = g.DefineAsRegister(cont->result());
285 }
286
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000287 DCHECK_NE(0u, input_count);
288 DCHECK((output_count != 0) || is_cmp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000289 DCHECK_GE(arraysize(inputs), input_count);
290 DCHECK_GE(arraysize(outputs), output_count);
291
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000292 selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
293 inputs);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000294}
295
296
297// Shared routine for multiple binary operations.
298template <typename Matcher>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000299void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
300 ImmediateMode operand_mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000301 FlagsContinuation cont;
302 VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
303}
304
305
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400306template <typename Matcher>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000307void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
308 ArchOpcode negate_opcode) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400309 Arm64OperandGenerator g(selector);
310 Matcher m(node);
311 if (m.right().HasValue() && (m.right().Value() < 0) &&
312 g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
313 selector->Emit(negate_opcode, g.DefineAsRegister(node),
314 g.UseRegister(m.left().node()),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000315 g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400316 } else {
317 VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
318 }
319}
320
321
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000322// For multiplications by immediate of the form x * (2^k + 1), where k > 0,
323// return the value of k, otherwise return zero. This is used to reduce the
324// multiplication to addition with left shift: x + (x << k).
325template <typename Matcher>
326int32_t LeftShiftForReducedMultiply(Matcher* m) {
327 DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
328 if (m->right().HasValue() && m->right().Value() >= 3) {
329 uint64_t value_minus_one = m->right().Value() - 1;
330 if (base::bits::IsPowerOfTwo64(value_minus_one)) {
331 return WhichPowerOf2_64(value_minus_one);
332 }
333 }
334 return 0;
335}
336
337} // namespace
338
339
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000340void InstructionSelector::VisitLoad(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000341 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000342 Arm64OperandGenerator g(this);
343 Node* base = node->InputAt(0);
344 Node* index = node->InputAt(1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000345 ArchOpcode opcode = kArchNop;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000346 ImmediateMode immediate_mode = kNoImmediate;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000347 switch (load_rep.representation()) {
348 case MachineRepresentation::kFloat32:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000349 opcode = kArm64LdrS;
350 immediate_mode = kLoadStoreImm32;
351 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000352 case MachineRepresentation::kFloat64:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000353 opcode = kArm64LdrD;
354 immediate_mode = kLoadStoreImm64;
355 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000356 case MachineRepresentation::kBit: // Fall through.
357 case MachineRepresentation::kWord8:
358 opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000359 immediate_mode = kLoadStoreImm8;
360 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000361 case MachineRepresentation::kWord16:
362 opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000363 immediate_mode = kLoadStoreImm16;
364 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000365 case MachineRepresentation::kWord32:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000366 opcode = kArm64LdrW;
367 immediate_mode = kLoadStoreImm32;
368 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000369 case MachineRepresentation::kTagged: // Fall through.
370 case MachineRepresentation::kWord64:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000371 opcode = kArm64Ldr;
372 immediate_mode = kLoadStoreImm64;
373 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000374 case MachineRepresentation::kNone:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000375 UNREACHABLE();
376 return;
377 }
378 if (g.CanBeImmediate(index, immediate_mode)) {
379 Emit(opcode | AddressingModeField::encode(kMode_MRI),
380 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
381 } else {
382 Emit(opcode | AddressingModeField::encode(kMode_MRR),
383 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
384 }
385}
386
387
388void InstructionSelector::VisitStore(Node* node) {
389 Arm64OperandGenerator g(this);
390 Node* base = node->InputAt(0);
391 Node* index = node->InputAt(1);
392 Node* value = node->InputAt(2);
393
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000394 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
395 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
396 MachineRepresentation rep = store_rep.representation();
397
398 // TODO(arm64): I guess this could be done in a better way.
399 if (write_barrier_kind != kNoWriteBarrier) {
400 DCHECK_EQ(MachineRepresentation::kTagged, rep);
401 InstructionOperand inputs[3];
402 size_t input_count = 0;
403 inputs[input_count++] = g.UseUniqueRegister(base);
404 inputs[input_count++] = g.UseUniqueRegister(index);
405 inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
406 ? g.UseRegister(value)
407 : g.UseUniqueRegister(value);
408 RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
409 switch (write_barrier_kind) {
410 case kNoWriteBarrier:
411 UNREACHABLE();
412 break;
413 case kMapWriteBarrier:
414 record_write_mode = RecordWriteMode::kValueIsMap;
415 break;
416 case kPointerWriteBarrier:
417 record_write_mode = RecordWriteMode::kValueIsPointer;
418 break;
419 case kFullWriteBarrier:
420 record_write_mode = RecordWriteMode::kValueIsAny;
421 break;
422 }
423 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
424 size_t const temp_count = arraysize(temps);
425 InstructionCode code = kArchStoreWithWriteBarrier;
426 code |= MiscField::encode(static_cast<int>(record_write_mode));
427 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000428 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000429 ArchOpcode opcode = kArchNop;
430 ImmediateMode immediate_mode = kNoImmediate;
431 switch (rep) {
432 case MachineRepresentation::kFloat32:
433 opcode = kArm64StrS;
434 immediate_mode = kLoadStoreImm32;
435 break;
436 case MachineRepresentation::kFloat64:
437 opcode = kArm64StrD;
438 immediate_mode = kLoadStoreImm64;
439 break;
440 case MachineRepresentation::kBit: // Fall through.
441 case MachineRepresentation::kWord8:
442 opcode = kArm64Strb;
443 immediate_mode = kLoadStoreImm8;
444 break;
445 case MachineRepresentation::kWord16:
446 opcode = kArm64Strh;
447 immediate_mode = kLoadStoreImm16;
448 break;
449 case MachineRepresentation::kWord32:
450 opcode = kArm64StrW;
451 immediate_mode = kLoadStoreImm32;
452 break;
453 case MachineRepresentation::kTagged: // Fall through.
454 case MachineRepresentation::kWord64:
455 opcode = kArm64Str;
456 immediate_mode = kLoadStoreImm64;
457 break;
458 case MachineRepresentation::kNone:
459 UNREACHABLE();
460 return;
461 }
462 if (g.CanBeImmediate(index, immediate_mode)) {
463 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
464 g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
465 } else {
466 Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
467 g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
468 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000469 }
470}
471
472
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400473void InstructionSelector::VisitCheckedLoad(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000474 CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400475 Arm64OperandGenerator g(this);
476 Node* const buffer = node->InputAt(0);
477 Node* const offset = node->InputAt(1);
478 Node* const length = node->InputAt(2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000479 ArchOpcode opcode = kArchNop;
480 switch (load_rep.representation()) {
481 case MachineRepresentation::kWord8:
482 opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400483 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000484 case MachineRepresentation::kWord16:
485 opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400486 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000487 case MachineRepresentation::kWord32:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400488 opcode = kCheckedLoadWord32;
489 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000490 case MachineRepresentation::kWord64:
491 opcode = kCheckedLoadWord64;
492 break;
493 case MachineRepresentation::kFloat32:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400494 opcode = kCheckedLoadFloat32;
495 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000496 case MachineRepresentation::kFloat64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400497 opcode = kCheckedLoadFloat64;
498 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000499 case MachineRepresentation::kBit: // Fall through.
500 case MachineRepresentation::kTagged: // Fall through.
501 case MachineRepresentation::kNone:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400502 UNREACHABLE();
503 return;
504 }
505 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
506 g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
507}
508
509
510void InstructionSelector::VisitCheckedStore(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000511 MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400512 Arm64OperandGenerator g(this);
513 Node* const buffer = node->InputAt(0);
514 Node* const offset = node->InputAt(1);
515 Node* const length = node->InputAt(2);
516 Node* const value = node->InputAt(3);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000517 ArchOpcode opcode = kArchNop;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400518 switch (rep) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000519 case MachineRepresentation::kWord8:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400520 opcode = kCheckedStoreWord8;
521 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000522 case MachineRepresentation::kWord16:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400523 opcode = kCheckedStoreWord16;
524 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000525 case MachineRepresentation::kWord32:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400526 opcode = kCheckedStoreWord32;
527 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000528 case MachineRepresentation::kWord64:
529 opcode = kCheckedStoreWord64;
530 break;
531 case MachineRepresentation::kFloat32:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400532 opcode = kCheckedStoreFloat32;
533 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000534 case MachineRepresentation::kFloat64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400535 opcode = kCheckedStoreFloat64;
536 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000537 case MachineRepresentation::kBit: // Fall through.
538 case MachineRepresentation::kTagged: // Fall through.
539 case MachineRepresentation::kNone:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400540 UNREACHABLE();
541 return;
542 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000543 Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400544 g.UseOperand(length, kArithmeticImm), g.UseRegister(value));
545}
546
547
548template <typename Matcher>
549static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
550 ArchOpcode opcode, bool left_can_cover,
551 bool right_can_cover, ImmediateMode imm_mode) {
552 Arm64OperandGenerator g(selector);
553
554 // Map instruction to equivalent operation with inverted right input.
555 ArchOpcode inv_opcode = opcode;
556 switch (opcode) {
557 case kArm64And32:
558 inv_opcode = kArm64Bic32;
559 break;
560 case kArm64And:
561 inv_opcode = kArm64Bic;
562 break;
563 case kArm64Or32:
564 inv_opcode = kArm64Orn32;
565 break;
566 case kArm64Or:
567 inv_opcode = kArm64Orn;
568 break;
569 case kArm64Eor32:
570 inv_opcode = kArm64Eon32;
571 break;
572 case kArm64Eor:
573 inv_opcode = kArm64Eon;
574 break;
575 default:
576 UNREACHABLE();
577 }
578
579 // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
580 if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
581 Matcher mleft(m->left().node());
582 if (mleft.right().Is(-1)) {
583 // TODO(all): support shifted operand on right.
584 selector->Emit(inv_opcode, g.DefineAsRegister(node),
585 g.UseRegister(m->right().node()),
586 g.UseRegister(mleft.left().node()));
587 return;
588 }
589 }
590
591 // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
592 if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
593 right_can_cover) {
594 Matcher mright(m->right().node());
595 if (mright.right().Is(-1)) {
596 // TODO(all): support shifted operand on right.
597 selector->Emit(inv_opcode, g.DefineAsRegister(node),
598 g.UseRegister(m->left().node()),
599 g.UseRegister(mright.left().node()));
600 return;
601 }
602 }
603
604 if (m->IsWord32Xor() && m->right().Is(-1)) {
605 selector->Emit(kArm64Not32, g.DefineAsRegister(node),
606 g.UseRegister(m->left().node()));
607 } else if (m->IsWord64Xor() && m->right().Is(-1)) {
608 selector->Emit(kArm64Not, g.DefineAsRegister(node),
609 g.UseRegister(m->left().node()));
610 } else {
611 VisitBinop<Matcher>(selector, node, opcode, imm_mode);
612 }
613}
614
615
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000616void InstructionSelector::VisitWord32And(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400617 Arm64OperandGenerator g(this);
618 Int32BinopMatcher m(node);
619 if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
620 m.right().HasValue()) {
621 uint32_t mask = m.right().Value();
622 uint32_t mask_width = base::bits::CountPopulation32(mask);
623 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
624 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
625 // The mask must be contiguous, and occupy the least-significant bits.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000626 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400627
628 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
629 // significant bits.
630 Int32BinopMatcher mleft(m.left().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000631 if (mleft.right().HasValue()) {
632 // Any shift value can match; int32 shifts use `value % 32`.
633 uint32_t lsb = mleft.right().Value() & 0x1f;
634
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400635 // Ubfx cannot extract bits past the register size, however since
636 // shifting the original value would have introduced some zeros we can
637 // still use ubfx with a smaller mask and the remaining bits will be
638 // zeros.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400639 if (lsb + mask_width > 32) mask_width = 32 - lsb;
640
641 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
642 g.UseRegister(mleft.left().node()),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000643 g.UseImmediateOrTemp(mleft.right().node(), lsb),
644 g.TempImmediate(mask_width));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400645 return;
646 }
647 // Other cases fall through to the normal And operation.
648 }
649 }
650 VisitLogical<Int32BinopMatcher>(
651 this, node, &m, kArm64And32, CanCover(node, m.left().node()),
652 CanCover(node, m.right().node()), kLogical32Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000653}
654
655
656void InstructionSelector::VisitWord64And(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400657 Arm64OperandGenerator g(this);
658 Int64BinopMatcher m(node);
659 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
660 m.right().HasValue()) {
661 uint64_t mask = m.right().Value();
662 uint64_t mask_width = base::bits::CountPopulation64(mask);
663 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
664 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
665 // The mask must be contiguous, and occupy the least-significant bits.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000666 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400667
668 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
669 // significant bits.
670 Int64BinopMatcher mleft(m.left().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000671 if (mleft.right().HasValue()) {
672 // Any shift value can match; int64 shifts use `value % 64`.
673 uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
674
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400675 // Ubfx cannot extract bits past the register size, however since
676 // shifting the original value would have introduced some zeros we can
677 // still use ubfx with a smaller mask and the remaining bits will be
678 // zeros.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400679 if (lsb + mask_width > 64) mask_width = 64 - lsb;
680
681 Emit(kArm64Ubfx, g.DefineAsRegister(node),
682 g.UseRegister(mleft.left().node()),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000683 g.UseImmediateOrTemp(mleft.right().node(), lsb),
684 g.TempImmediate(static_cast<int32_t>(mask_width)));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400685 return;
686 }
687 // Other cases fall through to the normal And operation.
688 }
689 }
690 VisitLogical<Int64BinopMatcher>(
691 this, node, &m, kArm64And, CanCover(node, m.left().node()),
692 CanCover(node, m.right().node()), kLogical64Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000693}
694
695
696void InstructionSelector::VisitWord32Or(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400697 Int32BinopMatcher m(node);
698 VisitLogical<Int32BinopMatcher>(
699 this, node, &m, kArm64Or32, CanCover(node, m.left().node()),
700 CanCover(node, m.right().node()), kLogical32Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000701}
702
703
704void InstructionSelector::VisitWord64Or(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400705 Int64BinopMatcher m(node);
706 VisitLogical<Int64BinopMatcher>(
707 this, node, &m, kArm64Or, CanCover(node, m.left().node()),
708 CanCover(node, m.right().node()), kLogical64Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000709}
710
711
712void InstructionSelector::VisitWord32Xor(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000713 Int32BinopMatcher m(node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400714 VisitLogical<Int32BinopMatcher>(
715 this, node, &m, kArm64Eor32, CanCover(node, m.left().node()),
716 CanCover(node, m.right().node()), kLogical32Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000717}
718
719
720void InstructionSelector::VisitWord64Xor(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000721 Int64BinopMatcher m(node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400722 VisitLogical<Int64BinopMatcher>(
723 this, node, &m, kArm64Eor, CanCover(node, m.left().node()),
724 CanCover(node, m.right().node()), kLogical64Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000725}
726
727
728void InstructionSelector::VisitWord32Shl(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000729 Int32BinopMatcher m(node);
730 if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
731 m.right().IsInRange(1, 31)) {
732 Arm64OperandGenerator g(this);
733 Int32BinopMatcher mleft(m.left().node());
734 if (mleft.right().HasValue()) {
735 uint32_t mask = mleft.right().Value();
736 uint32_t mask_width = base::bits::CountPopulation32(mask);
737 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
738 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
739 uint32_t shift = m.right().Value();
740 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
741 DCHECK_NE(0u, shift);
742
743 if ((shift + mask_width) >= 32) {
744 // If the mask is contiguous and reaches or extends beyond the top
745 // bit, only the shift is needed.
746 Emit(kArm64Lsl32, g.DefineAsRegister(node),
747 g.UseRegister(mleft.left().node()),
748 g.UseImmediate(m.right().node()));
749 return;
750 } else {
751 // Select Ubfiz for Shl(And(x, mask), imm) where the mask is
752 // contiguous, and the shift immediate non-zero.
753 Emit(kArm64Ubfiz32, g.DefineAsRegister(node),
754 g.UseRegister(mleft.left().node()),
755 g.UseImmediate(m.right().node()), g.TempImmediate(mask_width));
756 return;
757 }
758 }
759 }
760 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400761 VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000762}
763
764
765void InstructionSelector::VisitWord64Shl(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400766 Arm64OperandGenerator g(this);
767 Int64BinopMatcher m(node);
768 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
769 m.right().IsInRange(32, 63)) {
770 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
771 // 32 bits anyway.
772 Emit(kArm64Lsl, g.DefineAsRegister(node),
773 g.UseRegister(m.left().node()->InputAt(0)),
774 g.UseImmediate(m.right().node()));
775 return;
776 }
777 VisitRRO(this, kArm64Lsl, node, kShift64Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000778}
779
780
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000781namespace {
782
783bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
784 Arm64OperandGenerator g(selector);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400785 Int32BinopMatcher m(node);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000786 if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
787 // Select Ubfx or Sbfx for (x << (K & 0x1f)) OP (K & 0x1f), where
788 // OP is >>> or >> and (K & 0x1f) != 0.
789 Int32BinopMatcher mleft(m.left().node());
790 if (mleft.right().HasValue() && m.right().HasValue() &&
791 (mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
792 DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
793 ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
794
795 int right_val = m.right().Value() & 0x1f;
796 DCHECK_NE(right_val, 0);
797
798 selector->Emit(opcode, g.DefineAsRegister(node),
799 g.UseRegister(mleft.left().node()), g.TempImmediate(0),
800 g.TempImmediate(32 - right_val));
801 return true;
802 }
803 }
804 return false;
805}
806
807} // namespace
808
809
810void InstructionSelector::VisitWord32Shr(Node* node) {
811 Int32BinopMatcher m(node);
812 if (m.left().IsWord32And() && m.right().HasValue()) {
813 uint32_t lsb = m.right().Value() & 0x1f;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400814 Int32BinopMatcher mleft(m.left().node());
815 if (mleft.right().HasValue()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400816 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
817 // shifted into the least-significant bits.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000818 uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
819 unsigned mask_width = base::bits::CountPopulation32(mask);
820 unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400821 if ((mask_msb + mask_width + lsb) == 32) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000822 Arm64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400823 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
824 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000825 g.UseRegister(mleft.left().node()),
826 g.UseImmediateOrTemp(m.right().node(), lsb),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400827 g.TempImmediate(mask_width));
828 return;
829 }
830 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000831 } else if (TryEmitBitfieldExtract32(this, node)) {
832 return;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400833 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000834
835 if (m.left().IsUint32MulHigh() && m.right().HasValue() &&
836 CanCover(node, node->InputAt(0))) {
837 // Combine this shift with the multiply and shift that would be generated
838 // by Uint32MulHigh.
839 Arm64OperandGenerator g(this);
840 Node* left = m.left().node();
841 int shift = m.right().Value() & 0x1f;
842 InstructionOperand const smull_operand = g.TempRegister();
843 Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
844 g.UseRegister(left->InputAt(1)));
845 Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand,
846 g.TempImmediate(32 + shift));
847 return;
848 }
849
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400850 VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000851}
852
853
854void InstructionSelector::VisitWord64Shr(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400855 Int64BinopMatcher m(node);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000856 if (m.left().IsWord64And() && m.right().HasValue()) {
857 uint32_t lsb = m.right().Value() & 0x3f;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400858 Int64BinopMatcher mleft(m.left().node());
859 if (mleft.right().HasValue()) {
860 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
861 // shifted into the least-significant bits.
862 uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000863 unsigned mask_width = base::bits::CountPopulation64(mask);
864 unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400865 if ((mask_msb + mask_width + lsb) == 64) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000866 Arm64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400867 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
868 Emit(kArm64Ubfx, g.DefineAsRegister(node),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000869 g.UseRegister(mleft.left().node()),
870 g.UseImmediateOrTemp(m.right().node(), lsb),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400871 g.TempImmediate(mask_width));
872 return;
873 }
874 }
875 }
876 VisitRRO(this, kArm64Lsr, node, kShift64Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000877}
878
879
880void InstructionSelector::VisitWord32Sar(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000881 if (TryEmitBitfieldExtract32(this, node)) {
882 return;
883 }
884
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400885 Int32BinopMatcher m(node);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000886 if (m.left().IsInt32MulHigh() && m.right().HasValue() &&
887 CanCover(node, node->InputAt(0))) {
888 // Combine this shift with the multiply and shift that would be generated
889 // by Int32MulHigh.
890 Arm64OperandGenerator g(this);
891 Node* left = m.left().node();
892 int shift = m.right().Value() & 0x1f;
893 InstructionOperand const smull_operand = g.TempRegister();
894 Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
895 g.UseRegister(left->InputAt(1)));
896 Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand,
897 g.TempImmediate(32 + shift));
898 return;
899 }
900
901 if (m.left().IsInt32Add() && m.right().HasValue() &&
902 CanCover(node, node->InputAt(0))) {
903 Node* add_node = m.left().node();
904 Int32BinopMatcher madd_node(add_node);
905 if (madd_node.left().IsInt32MulHigh() &&
906 CanCover(add_node, madd_node.left().node())) {
907 // Combine the shift that would be generated by Int32MulHigh with the add
908 // on the left of this Sar operation. We do it here, as the result of the
909 // add potentially has 33 bits, so we have to ensure the result is
910 // truncated by being the input to this 32-bit Sar operation.
911 Arm64OperandGenerator g(this);
912 Node* mul_node = madd_node.left().node();
913
914 InstructionOperand const smull_operand = g.TempRegister();
915 Emit(kArm64Smull, smull_operand, g.UseRegister(mul_node->InputAt(0)),
916 g.UseRegister(mul_node->InputAt(1)));
917
918 InstructionOperand const add_operand = g.TempRegister();
919 Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
920 add_operand, g.UseRegister(add_node->InputAt(1)), smull_operand,
921 g.TempImmediate(32));
922
923 Emit(kArm64Asr32, g.DefineAsRegister(node), add_operand,
924 g.UseImmediate(node->InputAt(1)));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400925 return;
926 }
927 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000928
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400929 VisitRRO(this, kArm64Asr32, node, kShift32Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000930}
931
932
933void InstructionSelector::VisitWord64Sar(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400934 VisitRRO(this, kArm64Asr, node, kShift64Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000935}
936
937
938void InstructionSelector::VisitWord32Ror(Node* node) {
939 VisitRRO(this, kArm64Ror32, node, kShift32Imm);
940}
941
942
943void InstructionSelector::VisitWord64Ror(Node* node) {
944 VisitRRO(this, kArm64Ror, node, kShift64Imm);
945}
946
947
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000948void InstructionSelector::VisitWord64Clz(Node* node) {
949 Arm64OperandGenerator g(this);
950 Emit(kArm64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
951}
952
953
954void InstructionSelector::VisitWord32Clz(Node* node) {
955 Arm64OperandGenerator g(this);
956 Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
957}
958
959
960void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
961
962
963void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
964
965
966void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
967
968
969void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
970
971
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000972void InstructionSelector::VisitInt32Add(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400973 Arm64OperandGenerator g(this);
974 Int32BinopMatcher m(node);
975 // Select Madd(x, y, z) for Add(Mul(x, y), z).
976 if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
977 Int32BinopMatcher mleft(m.left().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000978 // Check multiply can't be later reduced to addition with shift.
979 if (LeftShiftForReducedMultiply(&mleft) == 0) {
980 Emit(kArm64Madd32, g.DefineAsRegister(node),
981 g.UseRegister(mleft.left().node()),
982 g.UseRegister(mleft.right().node()),
983 g.UseRegister(m.right().node()));
984 return;
985 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400986 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000987 // Select Madd(x, y, z) for Add(z, Mul(x, y)).
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400988 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
989 Int32BinopMatcher mright(m.right().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000990 // Check multiply can't be later reduced to addition with shift.
991 if (LeftShiftForReducedMultiply(&mright) == 0) {
992 Emit(kArm64Madd32, g.DefineAsRegister(node),
993 g.UseRegister(mright.left().node()),
994 g.UseRegister(mright.right().node()),
995 g.UseRegister(m.left().node()));
996 return;
997 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400998 }
999 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001000}
1001
1002
1003void InstructionSelector::VisitInt64Add(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001004 Arm64OperandGenerator g(this);
1005 Int64BinopMatcher m(node);
1006 // Select Madd(x, y, z) for Add(Mul(x, y), z).
1007 if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
1008 Int64BinopMatcher mleft(m.left().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001009 // Check multiply can't be later reduced to addition with shift.
1010 if (LeftShiftForReducedMultiply(&mleft) == 0) {
1011 Emit(kArm64Madd, g.DefineAsRegister(node),
1012 g.UseRegister(mleft.left().node()),
1013 g.UseRegister(mleft.right().node()),
1014 g.UseRegister(m.right().node()));
1015 return;
1016 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001017 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001018 // Select Madd(x, y, z) for Add(z, Mul(x, y)).
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001019 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
1020 Int64BinopMatcher mright(m.right().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001021 // Check multiply can't be later reduced to addition with shift.
1022 if (LeftShiftForReducedMultiply(&mright) == 0) {
1023 Emit(kArm64Madd, g.DefineAsRegister(node),
1024 g.UseRegister(mright.left().node()),
1025 g.UseRegister(mright.right().node()),
1026 g.UseRegister(m.left().node()));
1027 return;
1028 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001029 }
1030 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001031}
1032
1033
1034void InstructionSelector::VisitInt32Sub(Node* node) {
1035 Arm64OperandGenerator g(this);
1036 Int32BinopMatcher m(node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001037
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001038 // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001039 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
1040 Int32BinopMatcher mright(m.right().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001041 // Check multiply can't be later reduced to addition with shift.
1042 if (LeftShiftForReducedMultiply(&mright) == 0) {
1043 Emit(kArm64Msub32, g.DefineAsRegister(node),
1044 g.UseRegister(mright.left().node()),
1045 g.UseRegister(mright.right().node()),
1046 g.UseRegister(m.left().node()));
1047 return;
1048 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001049 }
1050
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001051 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001052}
1053
1054
1055void InstructionSelector::VisitInt64Sub(Node* node) {
1056 Arm64OperandGenerator g(this);
1057 Int64BinopMatcher m(node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001058
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001059 // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001060 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
1061 Int64BinopMatcher mright(m.right().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001062 // Check multiply can't be later reduced to addition with shift.
1063 if (LeftShiftForReducedMultiply(&mright) == 0) {
1064 Emit(kArm64Msub, g.DefineAsRegister(node),
1065 g.UseRegister(mright.left().node()),
1066 g.UseRegister(mright.right().node()),
1067 g.UseRegister(m.left().node()));
1068 return;
1069 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001070 }
1071
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001072 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001073}
1074
1075
1076void InstructionSelector::VisitInt32Mul(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001077 Arm64OperandGenerator g(this);
1078 Int32BinopMatcher m(node);
1079
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001080 // First, try to reduce the multiplication to addition with left shift.
1081 // x * (2^k + 1) -> x + (x << k)
1082 int32_t shift = LeftShiftForReducedMultiply(&m);
1083 if (shift > 0) {
1084 Emit(kArm64Add32 | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1085 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1086 g.UseRegister(m.left().node()), g.TempImmediate(shift));
1087 return;
1088 }
1089
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001090 if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
1091 Int32BinopMatcher mleft(m.left().node());
1092
1093 // Select Mneg(x, y) for Mul(Sub(0, x), y).
1094 if (mleft.left().Is(0)) {
1095 Emit(kArm64Mneg32, g.DefineAsRegister(node),
1096 g.UseRegister(mleft.right().node()),
1097 g.UseRegister(m.right().node()));
1098 return;
1099 }
1100 }
1101
1102 if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
1103 Int32BinopMatcher mright(m.right().node());
1104
1105 // Select Mneg(x, y) for Mul(x, Sub(0, y)).
1106 if (mright.left().Is(0)) {
1107 Emit(kArm64Mneg32, g.DefineAsRegister(node),
1108 g.UseRegister(m.left().node()),
1109 g.UseRegister(mright.right().node()));
1110 return;
1111 }
1112 }
1113
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001114 VisitRRR(this, kArm64Mul32, node);
1115}
1116
1117
1118void InstructionSelector::VisitInt64Mul(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001119 Arm64OperandGenerator g(this);
1120 Int64BinopMatcher m(node);
1121
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001122 // First, try to reduce the multiplication to addition with left shift.
1123 // x * (2^k + 1) -> x + (x << k)
1124 int32_t shift = LeftShiftForReducedMultiply(&m);
1125 if (shift > 0) {
1126 Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1127 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1128 g.UseRegister(m.left().node()), g.TempImmediate(shift));
1129 return;
1130 }
1131
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001132 if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
1133 Int64BinopMatcher mleft(m.left().node());
1134
1135 // Select Mneg(x, y) for Mul(Sub(0, x), y).
1136 if (mleft.left().Is(0)) {
1137 Emit(kArm64Mneg, g.DefineAsRegister(node),
1138 g.UseRegister(mleft.right().node()),
1139 g.UseRegister(m.right().node()));
1140 return;
1141 }
1142 }
1143
1144 if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
1145 Int64BinopMatcher mright(m.right().node());
1146
1147 // Select Mneg(x, y) for Mul(x, Sub(0, y)).
1148 if (mright.left().Is(0)) {
1149 Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1150 g.UseRegister(mright.right().node()));
1151 return;
1152 }
1153 }
1154
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001155 VisitRRR(this, kArm64Mul, node);
1156}
1157
1158
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001159void InstructionSelector::VisitInt32MulHigh(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001160 Arm64OperandGenerator g(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001161 InstructionOperand const smull_operand = g.TempRegister();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001162 Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
1163 g.UseRegister(node->InputAt(1)));
1164 Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
1165}
1166
1167
1168void InstructionSelector::VisitUint32MulHigh(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001169 Arm64OperandGenerator g(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001170 InstructionOperand const smull_operand = g.TempRegister();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001171 Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
1172 g.UseRegister(node->InputAt(1)));
1173 Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
1174}
1175
1176
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001177void InstructionSelector::VisitInt32Div(Node* node) {
1178 VisitRRR(this, kArm64Idiv32, node);
1179}
1180
1181
1182void InstructionSelector::VisitInt64Div(Node* node) {
1183 VisitRRR(this, kArm64Idiv, node);
1184}
1185
1186
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001187void InstructionSelector::VisitUint32Div(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001188 VisitRRR(this, kArm64Udiv32, node);
1189}
1190
1191
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001192void InstructionSelector::VisitUint64Div(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001193 VisitRRR(this, kArm64Udiv, node);
1194}
1195
1196
1197void InstructionSelector::VisitInt32Mod(Node* node) {
1198 VisitRRR(this, kArm64Imod32, node);
1199}
1200
1201
1202void InstructionSelector::VisitInt64Mod(Node* node) {
1203 VisitRRR(this, kArm64Imod, node);
1204}
1205
1206
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001207void InstructionSelector::VisitUint32Mod(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001208 VisitRRR(this, kArm64Umod32, node);
1209}
1210
1211
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001212void InstructionSelector::VisitUint64Mod(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001213 VisitRRR(this, kArm64Umod, node);
1214}
1215
1216
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001217void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001218 VisitRR(this, kArm64Float32ToFloat64, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001219}
1220
1221
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001222void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001223 VisitRR(this, kArm64Int32ToFloat64, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001224}
1225
1226
1227void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001228 VisitRR(this, kArm64Uint32ToFloat64, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001229}
1230
1231
1232void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001233 VisitRR(this, kArm64Float64ToInt32, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001234}
1235
1236
1237void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001238 VisitRR(this, kArm64Float64ToUint32, node);
1239}
1240
1241
1242void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001243 Arm64OperandGenerator g(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001244
1245 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1246 InstructionOperand outputs[2];
1247 size_t output_count = 0;
1248 outputs[output_count++] = g.DefineAsRegister(node);
1249
1250 Node* success_output = NodeProperties::FindProjection(node, 1);
1251 if (success_output) {
1252 outputs[output_count++] = g.DefineAsRegister(success_output);
1253 }
1254
1255 Emit(kArm64Float32ToInt64, output_count, outputs, 1, inputs);
1256}
1257
1258
1259void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1260 Arm64OperandGenerator g(this);
1261
1262 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1263 InstructionOperand outputs[2];
1264 size_t output_count = 0;
1265 outputs[output_count++] = g.DefineAsRegister(node);
1266
1267 Node* success_output = NodeProperties::FindProjection(node, 1);
1268 if (success_output) {
1269 outputs[output_count++] = g.DefineAsRegister(success_output);
1270 }
1271
1272 Emit(kArm64Float64ToInt64, output_count, outputs, 1, inputs);
1273}
1274
1275
1276void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1277 Arm64OperandGenerator g(this);
1278
1279 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1280 InstructionOperand outputs[2];
1281 size_t output_count = 0;
1282 outputs[output_count++] = g.DefineAsRegister(node);
1283
1284 Node* success_output = NodeProperties::FindProjection(node, 1);
1285 if (success_output) {
1286 outputs[output_count++] = g.DefineAsRegister(success_output);
1287 }
1288
1289 Emit(kArm64Float32ToUint64, output_count, outputs, 1, inputs);
1290}
1291
1292
1293void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1294 Arm64OperandGenerator g(this);
1295
1296 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1297 InstructionOperand outputs[2];
1298 size_t output_count = 0;
1299 outputs[output_count++] = g.DefineAsRegister(node);
1300
1301 Node* success_output = NodeProperties::FindProjection(node, 1);
1302 if (success_output) {
1303 outputs[output_count++] = g.DefineAsRegister(success_output);
1304 }
1305
1306 Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001307}
1308
1309
1310void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001311 VisitRR(this, kArm64Sxtw, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001312}
1313
1314
1315void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1316 Arm64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001317 Node* value = node->InputAt(0);
1318 switch (value->opcode()) {
1319 case IrOpcode::kWord32And:
1320 case IrOpcode::kWord32Or:
1321 case IrOpcode::kWord32Xor:
1322 case IrOpcode::kWord32Shl:
1323 case IrOpcode::kWord32Shr:
1324 case IrOpcode::kWord32Sar:
1325 case IrOpcode::kWord32Ror:
1326 case IrOpcode::kWord32Equal:
1327 case IrOpcode::kInt32Add:
1328 case IrOpcode::kInt32AddWithOverflow:
1329 case IrOpcode::kInt32Sub:
1330 case IrOpcode::kInt32SubWithOverflow:
1331 case IrOpcode::kInt32Mul:
1332 case IrOpcode::kInt32MulHigh:
1333 case IrOpcode::kInt32Div:
1334 case IrOpcode::kInt32Mod:
1335 case IrOpcode::kInt32LessThan:
1336 case IrOpcode::kInt32LessThanOrEqual:
1337 case IrOpcode::kUint32Div:
1338 case IrOpcode::kUint32LessThan:
1339 case IrOpcode::kUint32LessThanOrEqual:
1340 case IrOpcode::kUint32Mod:
1341 case IrOpcode::kUint32MulHigh: {
1342 // 32-bit operations will write their result in a W register (implicitly
1343 // clearing the top 32-bit of the corresponding X register) so the
1344 // zero-extension is a no-op.
1345 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1346 return;
1347 }
1348 default:
1349 break;
1350 }
1351 Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
1352}
1353
1354
1355void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001356 VisitRR(this, kArm64Float64ToFloat32, node);
1357}
1358
1359
1360void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
1361 switch (TruncationModeOf(node->op())) {
1362 case TruncationMode::kJavaScript:
1363 return VisitRR(this, kArchTruncateDoubleToI, node);
1364 case TruncationMode::kRoundToZero:
1365 return VisitRR(this, kArm64Float64ToInt32, node);
1366 }
1367 UNREACHABLE();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001368}
1369
1370
1371void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1372 Arm64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001373 Node* value = node->InputAt(0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001374 if (CanCover(node, value) && value->InputCount() >= 2) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001375 Int64BinopMatcher m(value);
1376 if ((m.IsWord64Sar() && m.right().HasValue() &&
1377 (m.right().Value() == 32)) ||
1378 (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
1379 Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1380 g.UseImmediate(m.right().node()));
1381 return;
1382 }
1383 }
1384
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001385 Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1386}
1387
1388
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001389void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1390 VisitRR(this, kArm64Int64ToFloat32, node);
1391}
1392
1393
1394void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1395 VisitRR(this, kArm64Int64ToFloat64, node);
1396}
1397
1398
1399void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1400 VisitRR(this, kArm64Uint64ToFloat32, node);
1401}
1402
1403
1404void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1405 VisitRR(this, kArm64Uint64ToFloat64, node);
1406}
1407
1408
1409void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1410 VisitRR(this, kArm64Float64ExtractLowWord32, node);
1411}
1412
1413
1414void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1415 VisitRR(this, kArm64U64MoveFloat64, node);
1416}
1417
1418
1419void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1420 VisitRR(this, kArm64Float64MoveU64, node);
1421}
1422
1423
1424void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1425 VisitRR(this, kArm64Float64MoveU64, node);
1426}
1427
1428
1429void InstructionSelector::VisitFloat32Add(Node* node) {
1430 VisitRRR(this, kArm64Float32Add, node);
1431}
1432
1433
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001434void InstructionSelector::VisitFloat64Add(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001435 VisitRRR(this, kArm64Float64Add, node);
1436}
1437
1438
1439void InstructionSelector::VisitFloat32Sub(Node* node) {
1440 VisitRRR(this, kArm64Float32Sub, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001441}
1442
1443
1444void InstructionSelector::VisitFloat64Sub(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001445 Arm64OperandGenerator g(this);
1446 Float64BinopMatcher m(node);
1447 if (m.left().IsMinusZero()) {
1448 if (m.right().IsFloat64RoundDown() &&
1449 CanCover(m.node(), m.right().node())) {
1450 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
1451 CanCover(m.right().node(), m.right().InputAt(0))) {
1452 Float64BinopMatcher mright0(m.right().InputAt(0));
1453 if (mright0.left().IsMinusZero()) {
1454 Emit(kArm64Float64RoundUp, g.DefineAsRegister(node),
1455 g.UseRegister(mright0.right().node()));
1456 return;
1457 }
1458 }
1459 }
1460 Emit(kArm64Float64Neg, g.DefineAsRegister(node),
1461 g.UseRegister(m.right().node()));
1462 return;
1463 }
1464 VisitRRR(this, kArm64Float64Sub, node);
1465}
1466
1467
1468void InstructionSelector::VisitFloat32Mul(Node* node) {
1469 VisitRRR(this, kArm64Float32Mul, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001470}
1471
1472
1473void InstructionSelector::VisitFloat64Mul(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001474 VisitRRR(this, kArm64Float64Mul, node);
1475}
1476
1477
1478void InstructionSelector::VisitFloat32Div(Node* node) {
1479 VisitRRR(this, kArm64Float32Div, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001480}
1481
1482
1483void InstructionSelector::VisitFloat64Div(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001484 VisitRRR(this, kArm64Float64Div, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001485}
1486
1487
1488void InstructionSelector::VisitFloat64Mod(Node* node) {
1489 Arm64OperandGenerator g(this);
1490 Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
1491 g.UseFixed(node->InputAt(0), d0),
1492 g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
1493}
1494
1495
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001496void InstructionSelector::VisitFloat32Max(Node* node) {
1497 VisitRRR(this, kArm64Float32Max, node);
1498}
1499
1500
1501void InstructionSelector::VisitFloat64Max(Node* node) {
1502 VisitRRR(this, kArm64Float64Max, node);
1503}
1504
1505
1506void InstructionSelector::VisitFloat32Min(Node* node) {
1507 VisitRRR(this, kArm64Float32Min, node);
1508}
1509
1510
1511void InstructionSelector::VisitFloat64Min(Node* node) {
1512 VisitRRR(this, kArm64Float64Min, node);
1513}
1514
1515
1516void InstructionSelector::VisitFloat32Abs(Node* node) {
1517 VisitRR(this, kArm64Float32Abs, node);
1518}
1519
1520
1521void InstructionSelector::VisitFloat64Abs(Node* node) {
1522 VisitRR(this, kArm64Float64Abs, node);
1523}
1524
1525
1526void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1527 VisitRR(this, kArm64Float32Sqrt, node);
1528}
1529
1530
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001531void InstructionSelector::VisitFloat64Sqrt(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001532 VisitRR(this, kArm64Float64Sqrt, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001533}
1534
1535
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001536void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1537 VisitRR(this, kArm64Float32RoundDown, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001538}
1539
1540
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001541void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1542 VisitRR(this, kArm64Float64RoundDown, node);
1543}
1544
1545
1546void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1547 VisitRR(this, kArm64Float32RoundUp, node);
1548}
1549
1550
1551void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1552 VisitRR(this, kArm64Float64RoundUp, node);
1553}
1554
1555
1556void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1557 VisitRR(this, kArm64Float32RoundTruncate, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001558}
1559
1560
1561void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001562 VisitRR(this, kArm64Float64RoundTruncate, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001563}
1564
1565
1566void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001567 VisitRR(this, kArm64Float64RoundTiesAway, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001568}
1569
1570
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001571void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1572 VisitRR(this, kArm64Float32RoundTiesEven, node);
1573}
1574
1575
1576void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1577 VisitRR(this, kArm64Float64RoundTiesEven, node);
1578}
1579
1580
1581void InstructionSelector::EmitPrepareArguments(
1582 ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1583 Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001584 Arm64OperandGenerator g(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001585
1586 // Push the arguments to the stack.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001587 int aligned_push_count = static_cast<int>(arguments->size());
1588
1589 bool pushed_count_uneven = aligned_push_count & 1;
1590 int claim_count = aligned_push_count;
1591 if (pushed_count_uneven && descriptor->UseNativeStack()) {
1592 // We can only claim for an even number of call arguments when we use the
1593 // native stack.
1594 claim_count++;
1595 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001596 // TODO(dcarney): claim and poke probably take small immediates,
1597 // loop here or whatever.
1598 // Bump the stack pointer(s).
1599 if (aligned_push_count > 0) {
1600 // TODO(dcarney): it would be better to bump the csp here only
1601 // and emit paired stores with increment for non c frames.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001602 Emit(kArm64ClaimForCallArguments, g.NoOutput(),
1603 g.TempImmediate(claim_count));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001604 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001605
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001606 // Move arguments to the stack.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001607 int slot = aligned_push_count - 1;
1608 while (slot >= 0) {
1609 Emit(kArm64Poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
1610 g.TempImmediate(slot));
1611 slot--;
1612 // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
1613 // same type.
1614 // Emit(kArm64PokePair, g.NoOutput(), g.UseRegister((*arguments)[slot]),
1615 // g.UseRegister((*arguments)[slot - 1]), g.TempImmediate(slot));
1616 // slot -= 2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001617 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001618}
1619
1620
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001621bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1622
1623
1624namespace {
1625
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001626// Shared routine for multiple compare operations.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001627void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1628 InstructionOperand left, InstructionOperand right,
1629 FlagsContinuation* cont) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001630 Arm64OperandGenerator g(selector);
1631 opcode = cont->Encode(opcode);
1632 if (cont->IsBranch()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001633 selector->Emit(opcode, g.NoOutput(), left, right,
1634 g.Label(cont->true_block()), g.Label(cont->false_block()));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001635 } else {
1636 DCHECK(cont->IsSet());
1637 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001638 }
1639}
1640
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001641
1642// Shared routine for multiple word compare operations.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001643void VisitWordCompare(InstructionSelector* selector, Node* node,
1644 InstructionCode opcode, FlagsContinuation* cont,
1645 bool commutative, ImmediateMode immediate_mode) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001646 Arm64OperandGenerator g(selector);
1647 Node* left = node->InputAt(0);
1648 Node* right = node->InputAt(1);
1649
1650 // Match immediates on left or right side of comparison.
1651 if (g.CanBeImmediate(right, immediate_mode)) {
1652 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1653 cont);
1654 } else if (g.CanBeImmediate(left, immediate_mode)) {
1655 if (!commutative) cont->Commute();
1656 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1657 cont);
1658 } else {
1659 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1660 cont);
1661 }
1662}
1663
1664
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001665void VisitWord32Compare(InstructionSelector* selector, Node* node,
1666 FlagsContinuation* cont) {
1667 Int32BinopMatcher m(node);
1668 ArchOpcode opcode = kArm64Cmp32;
1669
1670 // Select negated compare for comparisons with negated right input.
1671 if (m.right().IsInt32Sub()) {
1672 Node* sub = m.right().node();
1673 Int32BinopMatcher msub(sub);
1674 if (msub.left().Is(0)) {
1675 bool can_cover = selector->CanCover(node, sub);
1676 node->ReplaceInput(1, msub.right().node());
1677 // Even if the comparison node covers the subtraction, after the input
1678 // replacement above, the node still won't cover the input to the
1679 // subtraction; the subtraction still uses it.
1680 // In order to get shifted operations to work, we must remove the rhs
1681 // input to the subtraction, as TryMatchAnyShift requires this node to
1682 // cover the input shift. We do this by setting it to the lhs input,
1683 // as we know it's zero, and the result of the subtraction isn't used by
1684 // any other node.
1685 if (can_cover) sub->ReplaceInput(1, msub.left().node());
1686 opcode = kArm64Cmn32;
1687 }
1688 }
1689 VisitBinop<Int32BinopMatcher>(selector, node, opcode, kArithmeticImm, cont);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001690}
1691
1692
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001693void VisitWordTest(InstructionSelector* selector, Node* node,
1694 InstructionCode opcode, FlagsContinuation* cont) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001695 Arm64OperandGenerator g(selector);
1696 VisitCompare(selector, opcode, g.UseRegister(node), g.UseRegister(node),
1697 cont);
1698}
1699
1700
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001701void VisitWord32Test(InstructionSelector* selector, Node* node,
1702 FlagsContinuation* cont) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001703 VisitWordTest(selector, node, kArm64Tst32, cont);
1704}
1705
1706
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001707void VisitWord64Test(InstructionSelector* selector, Node* node,
1708 FlagsContinuation* cont) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001709 VisitWordTest(selector, node, kArm64Tst, cont);
1710}
1711
1712
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001713// Shared routine for multiple float32 compare operations.
1714void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1715 FlagsContinuation* cont) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001716 Arm64OperandGenerator g(selector);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001717 Float32BinopMatcher m(node);
1718 if (m.right().Is(0.0f)) {
1719 VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
1720 g.UseImmediate(m.right().node()), cont);
1721 } else if (m.left().Is(0.0f)) {
1722 cont->Commute();
1723 VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.right().node()),
1724 g.UseImmediate(m.left().node()), cont);
1725 } else {
1726 VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
1727 g.UseRegister(m.right().node()), cont);
1728 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001729}
1730
1731
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001732// Shared routine for multiple float64 compare operations.
1733void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1734 FlagsContinuation* cont) {
1735 Arm64OperandGenerator g(selector);
1736 Float64BinopMatcher m(node);
1737 if (m.right().Is(0.0)) {
1738 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
1739 g.UseImmediate(m.right().node()), cont);
1740 } else if (m.left().Is(0.0)) {
1741 cont->Commute();
1742 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.right().node()),
1743 g.UseImmediate(m.left().node()), cont);
1744 } else {
1745 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
1746 g.UseRegister(m.right().node()), cont);
1747 }
1748}
1749
1750} // namespace
1751
1752
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001753void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1754 BasicBlock* fbranch) {
1755 OperandGenerator g(this);
1756 Node* user = branch;
1757 Node* value = branch->InputAt(0);
1758
1759 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1760
1761 // Try to combine with comparisons against 0 by simply inverting the branch.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001762 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
1763 Int32BinopMatcher m(value);
1764 if (m.right().Is(0)) {
1765 user = value;
1766 value = m.left().node();
1767 cont.Negate();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001768 } else {
1769 break;
1770 }
1771 }
1772
1773 // Try to combine the branch with a comparison.
1774 if (CanCover(user, value)) {
1775 switch (value->opcode()) {
1776 case IrOpcode::kWord32Equal:
1777 cont.OverwriteAndNegateIfEqual(kEqual);
1778 return VisitWord32Compare(this, value, &cont);
1779 case IrOpcode::kInt32LessThan:
1780 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1781 return VisitWord32Compare(this, value, &cont);
1782 case IrOpcode::kInt32LessThanOrEqual:
1783 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1784 return VisitWord32Compare(this, value, &cont);
1785 case IrOpcode::kUint32LessThan:
1786 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1787 return VisitWord32Compare(this, value, &cont);
1788 case IrOpcode::kUint32LessThanOrEqual:
1789 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1790 return VisitWord32Compare(this, value, &cont);
1791 case IrOpcode::kWord64Equal:
1792 cont.OverwriteAndNegateIfEqual(kEqual);
1793 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1794 kArithmeticImm);
1795 case IrOpcode::kInt64LessThan:
1796 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1797 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1798 kArithmeticImm);
1799 case IrOpcode::kInt64LessThanOrEqual:
1800 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1801 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1802 kArithmeticImm);
1803 case IrOpcode::kUint64LessThan:
1804 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1805 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1806 kArithmeticImm);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001807 case IrOpcode::kUint64LessThanOrEqual:
1808 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1809 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1810 kArithmeticImm);
1811 case IrOpcode::kFloat32Equal:
1812 cont.OverwriteAndNegateIfEqual(kEqual);
1813 return VisitFloat32Compare(this, value, &cont);
1814 case IrOpcode::kFloat32LessThan:
1815 cont.OverwriteAndNegateIfEqual(kFloatLessThan);
1816 return VisitFloat32Compare(this, value, &cont);
1817 case IrOpcode::kFloat32LessThanOrEqual:
1818 cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
1819 return VisitFloat32Compare(this, value, &cont);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001820 case IrOpcode::kFloat64Equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001821 cont.OverwriteAndNegateIfEqual(kEqual);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001822 return VisitFloat64Compare(this, value, &cont);
1823 case IrOpcode::kFloat64LessThan:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001824 cont.OverwriteAndNegateIfEqual(kFloatLessThan);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001825 return VisitFloat64Compare(this, value, &cont);
1826 case IrOpcode::kFloat64LessThanOrEqual:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001827 cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001828 return VisitFloat64Compare(this, value, &cont);
1829 case IrOpcode::kProjection:
1830 // Check if this is the overflow output projection of an
1831 // <Operation>WithOverflow node.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001832 if (ProjectionIndexOf(value->op()) == 1u) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001833 // We cannot combine the <Operation>WithOverflow with this branch
1834 // unless the 0th projection (the use of the actual value of the
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001835 // <Operation> is either nullptr, which means there's no use of the
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001836 // actual value, or was already defined, which means it is scheduled
1837 // *AFTER* this branch).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001838 Node* const node = value->InputAt(0);
1839 Node* const result = NodeProperties::FindProjection(node, 0);
1840 if (result == nullptr || IsDefined(result)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001841 switch (node->opcode()) {
1842 case IrOpcode::kInt32AddWithOverflow:
1843 cont.OverwriteAndNegateIfEqual(kOverflow);
1844 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
1845 kArithmeticImm, &cont);
1846 case IrOpcode::kInt32SubWithOverflow:
1847 cont.OverwriteAndNegateIfEqual(kOverflow);
1848 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
1849 kArithmeticImm, &cont);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001850 case IrOpcode::kInt64AddWithOverflow:
1851 cont.OverwriteAndNegateIfEqual(kOverflow);
1852 return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add,
1853 kArithmeticImm, &cont);
1854 case IrOpcode::kInt64SubWithOverflow:
1855 cont.OverwriteAndNegateIfEqual(kOverflow);
1856 return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub,
1857 kArithmeticImm, &cont);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001858 default:
1859 break;
1860 }
1861 }
1862 }
1863 break;
1864 case IrOpcode::kInt32Add:
1865 return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
1866 kArithmeticImm);
1867 case IrOpcode::kInt32Sub:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001868 return VisitWord32Compare(this, value, &cont);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001869 case IrOpcode::kWord32And: {
1870 Int32BinopMatcher m(value);
1871 if (m.right().HasValue() &&
1872 (base::bits::CountPopulation32(m.right().Value()) == 1)) {
1873 // If the mask has only one bit set, we can use tbz/tbnz.
1874 DCHECK((cont.condition() == kEqual) ||
1875 (cont.condition() == kNotEqual));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001876 Emit(cont.Encode(kArm64TestAndBranch32), g.NoOutput(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001877 g.UseRegister(m.left().node()),
1878 g.TempImmediate(
1879 base::bits::CountTrailingZeros32(m.right().Value())),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001880 g.Label(cont.true_block()), g.Label(cont.false_block()));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001881 return;
1882 }
1883 return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
1884 kLogical32Imm);
1885 }
1886 case IrOpcode::kWord64And: {
1887 Int64BinopMatcher m(value);
1888 if (m.right().HasValue() &&
1889 (base::bits::CountPopulation64(m.right().Value()) == 1)) {
1890 // If the mask has only one bit set, we can use tbz/tbnz.
1891 DCHECK((cont.condition() == kEqual) ||
1892 (cont.condition() == kNotEqual));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001893 Emit(cont.Encode(kArm64TestAndBranch), g.NoOutput(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001894 g.UseRegister(m.left().node()),
1895 g.TempImmediate(
1896 base::bits::CountTrailingZeros64(m.right().Value())),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001897 g.Label(cont.true_block()), g.Label(cont.false_block()));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001898 return;
1899 }
1900 return VisitWordCompare(this, value, kArm64Tst, &cont, true,
1901 kLogical64Imm);
1902 }
1903 default:
1904 break;
1905 }
1906 }
1907
1908 // Branch could not be combined with a compare, compare against 0 and branch.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001909 Emit(cont.Encode(kArm64CompareAndBranch32), g.NoOutput(),
1910 g.UseRegister(value), g.Label(cont.true_block()),
1911 g.Label(cont.false_block()));
1912}
1913
1914
1915void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1916 Arm64OperandGenerator g(this);
1917 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1918
1919 // Emit either ArchTableSwitch or ArchLookupSwitch.
1920 size_t table_space_cost = 4 + sw.value_range;
1921 size_t table_time_cost = 3;
1922 size_t lookup_space_cost = 3 + 2 * sw.case_count;
1923 size_t lookup_time_cost = sw.case_count;
1924 if (sw.case_count > 0 &&
1925 table_space_cost + 3 * table_time_cost <=
1926 lookup_space_cost + 3 * lookup_time_cost &&
1927 sw.min_value > std::numeric_limits<int32_t>::min()) {
1928 InstructionOperand index_operand = value_operand;
1929 if (sw.min_value) {
1930 index_operand = g.TempRegister();
1931 Emit(kArm64Sub32, index_operand, value_operand,
1932 g.TempImmediate(sw.min_value));
1933 }
1934 // Generate a table lookup.
1935 return EmitTableSwitch(sw, index_operand);
1936 }
1937
1938 // Generate a sequence of conditional jumps.
1939 return EmitLookupSwitch(sw, value_operand);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001940}
1941
1942
1943void InstructionSelector::VisitWord32Equal(Node* const node) {
1944 Node* const user = node;
1945 FlagsContinuation cont(kEqual, node);
1946 Int32BinopMatcher m(user);
1947 if (m.right().Is(0)) {
1948 Node* const value = m.left().node();
1949 if (CanCover(user, value)) {
1950 switch (value->opcode()) {
1951 case IrOpcode::kInt32Add:
1952 return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
1953 kArithmeticImm);
1954 case IrOpcode::kInt32Sub:
1955 return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
1956 kArithmeticImm);
1957 case IrOpcode::kWord32And:
1958 return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
1959 kLogical32Imm);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001960 case IrOpcode::kWord32Equal: {
1961 // Word32Equal(Word32Equal(x, y), 0) => Word32Compare(x, y, ne).
1962 Int32BinopMatcher mequal(value);
1963 node->ReplaceInput(0, mequal.left().node());
1964 node->ReplaceInput(1, mequal.right().node());
1965 cont.Negate();
1966 return VisitWord32Compare(this, node, &cont);
1967 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001968 default:
1969 break;
1970 }
1971 return VisitWord32Test(this, value, &cont);
1972 }
1973 }
1974 VisitWord32Compare(this, node, &cont);
1975}
1976
1977
1978void InstructionSelector::VisitInt32LessThan(Node* node) {
1979 FlagsContinuation cont(kSignedLessThan, node);
1980 VisitWord32Compare(this, node, &cont);
1981}
1982
1983
1984void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1985 FlagsContinuation cont(kSignedLessThanOrEqual, node);
1986 VisitWord32Compare(this, node, &cont);
1987}
1988
1989
1990void InstructionSelector::VisitUint32LessThan(Node* node) {
1991 FlagsContinuation cont(kUnsignedLessThan, node);
1992 VisitWord32Compare(this, node, &cont);
1993}
1994
1995
1996void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1997 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
1998 VisitWord32Compare(this, node, &cont);
1999}
2000
2001
2002void InstructionSelector::VisitWord64Equal(Node* const node) {
2003 Node* const user = node;
2004 FlagsContinuation cont(kEqual, node);
2005 Int64BinopMatcher m(user);
2006 if (m.right().Is(0)) {
2007 Node* const value = m.left().node();
2008 if (CanCover(user, value)) {
2009 switch (value->opcode()) {
2010 case IrOpcode::kWord64And:
2011 return VisitWordCompare(this, value, kArm64Tst, &cont, true,
2012 kLogical64Imm);
2013 default:
2014 break;
2015 }
2016 return VisitWord64Test(this, value, &cont);
2017 }
2018 }
2019 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2020}
2021
2022
2023void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002024 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002025 FlagsContinuation cont(kOverflow, ovf);
2026 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
2027 kArithmeticImm, &cont);
2028 }
2029 FlagsContinuation cont;
2030 VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, &cont);
2031}
2032
2033
2034void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002035 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002036 FlagsContinuation cont(kOverflow, ovf);
2037 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
2038 kArithmeticImm, &cont);
2039 }
2040 FlagsContinuation cont;
2041 VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, &cont);
2042}
2043
2044
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002045void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2046 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2047 FlagsContinuation cont(kOverflow, ovf);
2048 return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm,
2049 &cont);
2050 }
2051 FlagsContinuation cont;
2052 VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm, &cont);
2053}
2054
2055
2056void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2057 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2058 FlagsContinuation cont(kOverflow, ovf);
2059 return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm,
2060 &cont);
2061 }
2062 FlagsContinuation cont;
2063 VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm, &cont);
2064}
2065
2066
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002067void InstructionSelector::VisitInt64LessThan(Node* node) {
2068 FlagsContinuation cont(kSignedLessThan, node);
2069 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2070}
2071
2072
2073void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2074 FlagsContinuation cont(kSignedLessThanOrEqual, node);
2075 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2076}
2077
2078
2079void InstructionSelector::VisitUint64LessThan(Node* node) {
2080 FlagsContinuation cont(kUnsignedLessThan, node);
2081 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2082}
2083
2084
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002085void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2086 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
2087 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2088}
2089
2090
2091void InstructionSelector::VisitFloat32Equal(Node* node) {
2092 FlagsContinuation cont(kEqual, node);
2093 VisitFloat32Compare(this, node, &cont);
2094}
2095
2096
2097void InstructionSelector::VisitFloat32LessThan(Node* node) {
2098 FlagsContinuation cont(kFloatLessThan, node);
2099 VisitFloat32Compare(this, node, &cont);
2100}
2101
2102
2103void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2104 FlagsContinuation cont(kFloatLessThanOrEqual, node);
2105 VisitFloat32Compare(this, node, &cont);
2106}
2107
2108
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002109void InstructionSelector::VisitFloat64Equal(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002110 FlagsContinuation cont(kEqual, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002111 VisitFloat64Compare(this, node, &cont);
2112}
2113
2114
2115void InstructionSelector::VisitFloat64LessThan(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002116 FlagsContinuation cont(kFloatLessThan, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002117 VisitFloat64Compare(this, node, &cont);
2118}
2119
2120
2121void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002122 FlagsContinuation cont(kFloatLessThanOrEqual, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002123 VisitFloat64Compare(this, node, &cont);
2124}
2125
2126
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002127void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2128 Arm64OperandGenerator g(this);
2129 Emit(kArm64Float64ExtractLowWord32, g.DefineAsRegister(node),
2130 g.UseRegister(node->InputAt(0)));
2131}
2132
2133
2134void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2135 Arm64OperandGenerator g(this);
2136 Emit(kArm64Float64ExtractHighWord32, g.DefineAsRegister(node),
2137 g.UseRegister(node->InputAt(0)));
2138}
2139
2140
2141void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2142 Arm64OperandGenerator g(this);
2143 Node* left = node->InputAt(0);
2144 Node* right = node->InputAt(1);
2145 if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
2146 CanCover(node, left)) {
2147 Node* right_of_left = left->InputAt(1);
2148 Emit(kArm64Bfi, g.DefineSameAsFirst(right), g.UseRegister(right),
2149 g.UseRegister(right_of_left), g.TempImmediate(32),
2150 g.TempImmediate(32));
2151 Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
2152 return;
2153 }
2154 Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
2155 g.UseRegister(left), g.UseRegister(right));
2156}
2157
2158
2159void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2160 Arm64OperandGenerator g(this);
2161 Node* left = node->InputAt(0);
2162 Node* right = node->InputAt(1);
2163 if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
2164 CanCover(node, left)) {
2165 Node* right_of_left = left->InputAt(1);
2166 Emit(kArm64Bfi, g.DefineSameAsFirst(left), g.UseRegister(right_of_left),
2167 g.UseRegister(right), g.TempImmediate(32), g.TempImmediate(32));
2168 Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
2169 return;
2170 }
2171 Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
2172 g.UseRegister(left), g.UseRegister(right));
2173}
2174
2175
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002176// static
2177MachineOperatorBuilder::Flags
2178InstructionSelector::SupportedMachineOperatorFlags() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002179 return MachineOperatorBuilder::kFloat32Max |
2180 MachineOperatorBuilder::kFloat32Min |
2181 MachineOperatorBuilder::kFloat32RoundDown |
2182 MachineOperatorBuilder::kFloat64Max |
2183 MachineOperatorBuilder::kFloat64Min |
2184 MachineOperatorBuilder::kFloat64RoundDown |
2185 MachineOperatorBuilder::kFloat32RoundUp |
2186 MachineOperatorBuilder::kFloat64RoundUp |
2187 MachineOperatorBuilder::kFloat32RoundTruncate |
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002188 MachineOperatorBuilder::kFloat64RoundTruncate |
2189 MachineOperatorBuilder::kFloat64RoundTiesAway |
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002190 MachineOperatorBuilder::kFloat32RoundTiesEven |
2191 MachineOperatorBuilder::kFloat64RoundTiesEven |
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002192 MachineOperatorBuilder::kWord32ShiftIsSafe |
2193 MachineOperatorBuilder::kInt32DivIsSafe |
2194 MachineOperatorBuilder::kUint32DivIsSafe;
2195}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002196
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002197} // namespace compiler
2198} // namespace internal
2199} // namespace v8