blob: 26a289613483065ac8dab4ccf3561743d4a26656 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/instruction-selector-impl.h"
6#include "src/compiler/node-matchers.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007#include "src/compiler/node-properties.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00008
9namespace v8 {
10namespace internal {
11namespace compiler {
12
13enum ImmediateMode {
14 kArithmeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
15 kShift32Imm, // 0 - 31
16 kShift64Imm, // 0 - 63
17 kLogical32Imm,
18 kLogical64Imm,
19 kLoadStoreImm8, // signed 8 bit or 12 bit unsigned scaled by access size
20 kLoadStoreImm16,
21 kLoadStoreImm32,
22 kLoadStoreImm64,
23 kNoImmediate
24};
25
26
27// Adds Arm64-specific methods for generating operands.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000028class Arm64OperandGenerator final : public OperandGenerator {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000029 public:
30 explicit Arm64OperandGenerator(InstructionSelector* selector)
31 : OperandGenerator(selector) {}
32
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000033 InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000034 if (CanBeImmediate(node, mode)) {
35 return UseImmediate(node);
36 }
37 return UseRegister(node);
38 }
39
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000040 // Use the zero register if the node has the immediate value zero, otherwise
41 // assign a register.
42 InstructionOperand UseRegisterOrImmediateZero(Node* node) {
43 if (IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) {
44 return UseImmediate(node);
45 }
46 return UseRegister(node);
47 }
48
49 // Use the provided node if it has the required value, or create a
50 // TempImmediate otherwise.
51 InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
52 if (GetIntegerConstantValue(node) == value) {
53 return UseImmediate(node);
54 }
55 return TempImmediate(value);
56 }
57
58 bool IsIntegerConstant(Node* node) {
59 return (node->opcode() == IrOpcode::kInt32Constant) ||
60 (node->opcode() == IrOpcode::kInt64Constant);
61 }
62
63 int64_t GetIntegerConstantValue(Node* node) {
64 if (node->opcode() == IrOpcode::kInt32Constant) {
65 return OpParameter<int32_t>(node);
66 }
67 DCHECK(node->opcode() == IrOpcode::kInt64Constant);
68 return OpParameter<int64_t>(node);
69 }
70
Ben Murdochb8a8cc12014-11-26 15:28:44 +000071 bool CanBeImmediate(Node* node, ImmediateMode mode) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000072 return IsIntegerConstant(node) &&
73 CanBeImmediate(GetIntegerConstantValue(node), mode);
Emily Bernierd0a1eb72015-03-24 16:35:39 -040074 }
75
76 bool CanBeImmediate(int64_t value, ImmediateMode mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000077 unsigned ignored;
78 switch (mode) {
79 case kLogical32Imm:
80 // TODO(dcarney): some unencodable values can be handled by
81 // switching instructions.
82 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
83 &ignored, &ignored, &ignored);
84 case kLogical64Imm:
85 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
86 &ignored, &ignored, &ignored);
87 case kArithmeticImm:
Ben Murdochb8a8cc12014-11-26 15:28:44 +000088 return Assembler::IsImmAddSub(value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000089 case kLoadStoreImm8:
90 return IsLoadStoreImmediate(value, LSByte);
91 case kLoadStoreImm16:
92 return IsLoadStoreImmediate(value, LSHalfword);
93 case kLoadStoreImm32:
94 return IsLoadStoreImmediate(value, LSWord);
95 case kLoadStoreImm64:
96 return IsLoadStoreImmediate(value, LSDoubleWord);
97 case kNoImmediate:
98 return false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000099 case kShift32Imm: // Fall through.
100 case kShift64Imm:
101 // Shift operations only observe the bottom 5 or 6 bits of the value.
102 // All possible shifts can be encoded by discarding bits which have no
103 // effect.
104 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000105 }
106 return false;
107 }
108
109 private:
110 bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
111 return Assembler::IsImmLSScaled(value, size) ||
112 Assembler::IsImmLSUnscaled(value);
113 }
114};
115
116
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000117namespace {
118
119void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400120 Arm64OperandGenerator g(selector);
121 selector->Emit(opcode, g.DefineAsRegister(node),
122 g.UseRegister(node->InputAt(0)));
123}
124
125
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000126void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000127 Arm64OperandGenerator g(selector);
128 selector->Emit(opcode, g.DefineAsRegister(node),
129 g.UseRegister(node->InputAt(0)),
130 g.UseRegister(node->InputAt(1)));
131}
132
133
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000134void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
135 ImmediateMode operand_mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000136 Arm64OperandGenerator g(selector);
137 selector->Emit(opcode, g.DefineAsRegister(node),
138 g.UseRegister(node->InputAt(0)),
139 g.UseOperand(node->InputAt(1), operand_mode));
140}
141
142
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000143bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
144 Node* input_node, InstructionCode* opcode, bool try_ror) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400145 Arm64OperandGenerator g(selector);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000146
147 if (!selector->CanCover(node, input_node)) return false;
148 if (input_node->InputCount() != 2) return false;
149 if (!g.IsIntegerConstant(input_node->InputAt(1))) return false;
150
151 switch (input_node->opcode()) {
152 case IrOpcode::kWord32Shl:
153 case IrOpcode::kWord64Shl:
154 *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
155 return true;
156 case IrOpcode::kWord32Shr:
157 case IrOpcode::kWord64Shr:
158 *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
159 return true;
160 case IrOpcode::kWord32Sar:
161 case IrOpcode::kWord64Sar:
162 *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
163 return true;
164 case IrOpcode::kWord32Ror:
165 case IrOpcode::kWord64Ror:
166 if (try_ror) {
167 *opcode |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
168 return true;
169 }
170 return false;
171 default:
172 return false;
173 }
174}
175
176
177bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
178 Node* node, Node* left_node, Node* right_node,
179 InstructionOperand* left_op,
180 InstructionOperand* right_op, InstructionCode* opcode) {
181 if (!selector->CanCover(node, right_node)) return false;
182
183 NodeMatcher nm(right_node);
184
185 if (nm.IsWord32And()) {
186 Int32BinopMatcher mright(right_node);
187 if (mright.right().Is(0xff) || mright.right().Is(0xffff)) {
188 int32_t mask = mright.right().Value();
189 *left_op = g->UseRegister(left_node);
190 *right_op = g->UseRegister(mright.left().node());
191 *opcode |= AddressingModeField::encode(
192 (mask == 0xff) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
193 return true;
194 }
195 } else if (nm.IsWord32Sar()) {
196 Int32BinopMatcher mright(right_node);
197 if (selector->CanCover(mright.node(), mright.left().node()) &&
198 mright.left().IsWord32Shl()) {
199 Int32BinopMatcher mleft_of_right(mright.left().node());
200 if ((mright.right().Is(16) && mleft_of_right.right().Is(16)) ||
201 (mright.right().Is(24) && mleft_of_right.right().Is(24))) {
202 int32_t shift = mright.right().Value();
203 *left_op = g->UseRegister(left_node);
204 *right_op = g->UseRegister(mleft_of_right.left().node());
205 *opcode |= AddressingModeField::encode(
206 (shift == 24) ? kMode_Operand2_R_SXTB : kMode_Operand2_R_SXTH);
207 return true;
208 }
209 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400210 }
211 return false;
212}
213
214
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000215// Shared routine for multiple binary operations.
216template <typename Matcher>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000217void VisitBinop(InstructionSelector* selector, Node* node,
218 InstructionCode opcode, ImmediateMode operand_mode,
219 FlagsContinuation* cont) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000220 Arm64OperandGenerator g(selector);
221 Matcher m(node);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000222 InstructionOperand inputs[5];
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000223 size_t input_count = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000224 InstructionOperand outputs[2];
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000225 size_t output_count = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000226 bool is_cmp = (opcode == kArm64Cmp32) || (opcode == kArm64Cmn32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000227
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000228 // We can commute cmp by switching the inputs and commuting the flags
229 // continuation.
230 bool can_commute = m.HasProperty(Operator::kCommutative) || is_cmp;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400231
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000232 // The cmp and cmn instructions are encoded as sub or add with zero output
233 // register, and therefore support the same operand modes.
234 bool is_add_sub = m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() ||
235 m.IsInt64Sub() || is_cmp;
236
237 Node* left_node = m.left().node();
238 Node* right_node = m.right().node();
239
240 if (g.CanBeImmediate(right_node, operand_mode)) {
241 inputs[input_count++] = g.UseRegister(left_node);
242 inputs[input_count++] = g.UseImmediate(right_node);
243 } else if (is_cmp && g.CanBeImmediate(left_node, operand_mode)) {
244 cont->Commute();
245 inputs[input_count++] = g.UseRegister(right_node);
246 inputs[input_count++] = g.UseImmediate(left_node);
247 } else if (is_add_sub &&
248 TryMatchAnyExtend(&g, selector, node, left_node, right_node,
249 &inputs[0], &inputs[1], &opcode)) {
250 input_count += 2;
251 } else if (is_add_sub && can_commute &&
252 TryMatchAnyExtend(&g, selector, node, right_node, left_node,
253 &inputs[0], &inputs[1], &opcode)) {
254 if (is_cmp) cont->Commute();
255 input_count += 2;
256 } else if (TryMatchAnyShift(selector, node, right_node, &opcode,
257 !is_add_sub)) {
258 Matcher m_shift(right_node);
259 inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400260 inputs[input_count++] = g.UseRegister(m_shift.left().node());
261 inputs[input_count++] = g.UseImmediate(m_shift.right().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000262 } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
263 !is_add_sub)) {
264 if (is_cmp) cont->Commute();
265 Matcher m_shift(left_node);
266 inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400267 inputs[input_count++] = g.UseRegister(m_shift.left().node());
268 inputs[input_count++] = g.UseImmediate(m_shift.right().node());
269 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000270 inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
271 inputs[input_count++] = g.UseRegister(right_node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400272 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000273
274 if (cont->IsBranch()) {
275 inputs[input_count++] = g.Label(cont->true_block());
276 inputs[input_count++] = g.Label(cont->false_block());
277 }
278
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000279 if (!is_cmp) {
280 outputs[output_count++] = g.DefineAsRegister(node);
281 }
282
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000283 if (cont->IsSet()) {
284 outputs[output_count++] = g.DefineAsRegister(cont->result());
285 }
286
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000287 DCHECK_NE(0u, input_count);
288 DCHECK((output_count != 0) || is_cmp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000289 DCHECK_GE(arraysize(inputs), input_count);
290 DCHECK_GE(arraysize(outputs), output_count);
291
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000292 selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
293 inputs);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000294}
295
296
297// Shared routine for multiple binary operations.
298template <typename Matcher>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000299void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
300 ImmediateMode operand_mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000301 FlagsContinuation cont;
302 VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
303}
304
305
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400306template <typename Matcher>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000307void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
308 ArchOpcode negate_opcode) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400309 Arm64OperandGenerator g(selector);
310 Matcher m(node);
311 if (m.right().HasValue() && (m.right().Value() < 0) &&
312 g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
313 selector->Emit(negate_opcode, g.DefineAsRegister(node),
314 g.UseRegister(m.left().node()),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000315 g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400316 } else {
317 VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
318 }
319}
320
321
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000322// For multiplications by immediate of the form x * (2^k + 1), where k > 0,
323// return the value of k, otherwise return zero. This is used to reduce the
324// multiplication to addition with left shift: x + (x << k).
325template <typename Matcher>
326int32_t LeftShiftForReducedMultiply(Matcher* m) {
327 DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
328 if (m->right().HasValue() && m->right().Value() >= 3) {
329 uint64_t value_minus_one = m->right().Value() - 1;
330 if (base::bits::IsPowerOfTwo64(value_minus_one)) {
331 return WhichPowerOf2_64(value_minus_one);
332 }
333 }
334 return 0;
335}
336
337} // namespace
338
339
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000340void InstructionSelector::VisitLoad(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000341 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000342 Arm64OperandGenerator g(this);
343 Node* base = node->InputAt(0);
344 Node* index = node->InputAt(1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000345 ArchOpcode opcode = kArchNop;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000346 ImmediateMode immediate_mode = kNoImmediate;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000347 switch (load_rep.representation()) {
348 case MachineRepresentation::kFloat32:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000349 opcode = kArm64LdrS;
350 immediate_mode = kLoadStoreImm32;
351 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000352 case MachineRepresentation::kFloat64:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000353 opcode = kArm64LdrD;
354 immediate_mode = kLoadStoreImm64;
355 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000356 case MachineRepresentation::kBit: // Fall through.
357 case MachineRepresentation::kWord8:
358 opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000359 immediate_mode = kLoadStoreImm8;
360 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000361 case MachineRepresentation::kWord16:
362 opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000363 immediate_mode = kLoadStoreImm16;
364 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000365 case MachineRepresentation::kWord32:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000366 opcode = kArm64LdrW;
367 immediate_mode = kLoadStoreImm32;
368 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000369 case MachineRepresentation::kTagged: // Fall through.
370 case MachineRepresentation::kWord64:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000371 opcode = kArm64Ldr;
372 immediate_mode = kLoadStoreImm64;
373 break;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100374 case MachineRepresentation::kSimd128: // Fall through.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000375 case MachineRepresentation::kNone:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000376 UNREACHABLE();
377 return;
378 }
379 if (g.CanBeImmediate(index, immediate_mode)) {
380 Emit(opcode | AddressingModeField::encode(kMode_MRI),
381 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
382 } else {
383 Emit(opcode | AddressingModeField::encode(kMode_MRR),
384 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
385 }
386}
387
388
389void InstructionSelector::VisitStore(Node* node) {
390 Arm64OperandGenerator g(this);
391 Node* base = node->InputAt(0);
392 Node* index = node->InputAt(1);
393 Node* value = node->InputAt(2);
394
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000395 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
396 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
397 MachineRepresentation rep = store_rep.representation();
398
399 // TODO(arm64): I guess this could be done in a better way.
400 if (write_barrier_kind != kNoWriteBarrier) {
401 DCHECK_EQ(MachineRepresentation::kTagged, rep);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100402 AddressingMode addressing_mode;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000403 InstructionOperand inputs[3];
404 size_t input_count = 0;
405 inputs[input_count++] = g.UseUniqueRegister(base);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100406 // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
407 // must check kArithmeticImm as well as kLoadStoreImm64.
408 if (g.CanBeImmediate(index, kArithmeticImm) &&
409 g.CanBeImmediate(index, kLoadStoreImm64)) {
410 inputs[input_count++] = g.UseImmediate(index);
411 addressing_mode = kMode_MRI;
412 } else {
413 inputs[input_count++] = g.UseUniqueRegister(index);
414 addressing_mode = kMode_MRR;
415 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000416 inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
417 ? g.UseRegister(value)
418 : g.UseUniqueRegister(value);
419 RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
420 switch (write_barrier_kind) {
421 case kNoWriteBarrier:
422 UNREACHABLE();
423 break;
424 case kMapWriteBarrier:
425 record_write_mode = RecordWriteMode::kValueIsMap;
426 break;
427 case kPointerWriteBarrier:
428 record_write_mode = RecordWriteMode::kValueIsPointer;
429 break;
430 case kFullWriteBarrier:
431 record_write_mode = RecordWriteMode::kValueIsAny;
432 break;
433 }
434 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
435 size_t const temp_count = arraysize(temps);
436 InstructionCode code = kArchStoreWithWriteBarrier;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100437 code |= AddressingModeField::encode(addressing_mode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000438 code |= MiscField::encode(static_cast<int>(record_write_mode));
439 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000440 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000441 ArchOpcode opcode = kArchNop;
442 ImmediateMode immediate_mode = kNoImmediate;
443 switch (rep) {
444 case MachineRepresentation::kFloat32:
445 opcode = kArm64StrS;
446 immediate_mode = kLoadStoreImm32;
447 break;
448 case MachineRepresentation::kFloat64:
449 opcode = kArm64StrD;
450 immediate_mode = kLoadStoreImm64;
451 break;
452 case MachineRepresentation::kBit: // Fall through.
453 case MachineRepresentation::kWord8:
454 opcode = kArm64Strb;
455 immediate_mode = kLoadStoreImm8;
456 break;
457 case MachineRepresentation::kWord16:
458 opcode = kArm64Strh;
459 immediate_mode = kLoadStoreImm16;
460 break;
461 case MachineRepresentation::kWord32:
462 opcode = kArm64StrW;
463 immediate_mode = kLoadStoreImm32;
464 break;
465 case MachineRepresentation::kTagged: // Fall through.
466 case MachineRepresentation::kWord64:
467 opcode = kArm64Str;
468 immediate_mode = kLoadStoreImm64;
469 break;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100470 case MachineRepresentation::kSimd128: // Fall through.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000471 case MachineRepresentation::kNone:
472 UNREACHABLE();
473 return;
474 }
475 if (g.CanBeImmediate(index, immediate_mode)) {
476 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
477 g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
478 } else {
479 Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
480 g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
481 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000482 }
483}
484
485
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400486void InstructionSelector::VisitCheckedLoad(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000487 CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400488 Arm64OperandGenerator g(this);
489 Node* const buffer = node->InputAt(0);
490 Node* const offset = node->InputAt(1);
491 Node* const length = node->InputAt(2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000492 ArchOpcode opcode = kArchNop;
493 switch (load_rep.representation()) {
494 case MachineRepresentation::kWord8:
495 opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400496 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000497 case MachineRepresentation::kWord16:
498 opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400499 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000500 case MachineRepresentation::kWord32:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400501 opcode = kCheckedLoadWord32;
502 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000503 case MachineRepresentation::kWord64:
504 opcode = kCheckedLoadWord64;
505 break;
506 case MachineRepresentation::kFloat32:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400507 opcode = kCheckedLoadFloat32;
508 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000509 case MachineRepresentation::kFloat64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400510 opcode = kCheckedLoadFloat64;
511 break;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100512 case MachineRepresentation::kBit: // Fall through.
513 case MachineRepresentation::kTagged: // Fall through.
514 case MachineRepresentation::kSimd128: // Fall through.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000515 case MachineRepresentation::kNone:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400516 UNREACHABLE();
517 return;
518 }
519 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
520 g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
521}
522
523
524void InstructionSelector::VisitCheckedStore(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000525 MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400526 Arm64OperandGenerator g(this);
527 Node* const buffer = node->InputAt(0);
528 Node* const offset = node->InputAt(1);
529 Node* const length = node->InputAt(2);
530 Node* const value = node->InputAt(3);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000531 ArchOpcode opcode = kArchNop;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400532 switch (rep) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000533 case MachineRepresentation::kWord8:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400534 opcode = kCheckedStoreWord8;
535 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000536 case MachineRepresentation::kWord16:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400537 opcode = kCheckedStoreWord16;
538 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000539 case MachineRepresentation::kWord32:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400540 opcode = kCheckedStoreWord32;
541 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000542 case MachineRepresentation::kWord64:
543 opcode = kCheckedStoreWord64;
544 break;
545 case MachineRepresentation::kFloat32:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400546 opcode = kCheckedStoreFloat32;
547 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000548 case MachineRepresentation::kFloat64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400549 opcode = kCheckedStoreFloat64;
550 break;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100551 case MachineRepresentation::kBit: // Fall through.
552 case MachineRepresentation::kTagged: // Fall through.
553 case MachineRepresentation::kSimd128: // Fall through.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000554 case MachineRepresentation::kNone:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400555 UNREACHABLE();
556 return;
557 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000558 Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400559 g.UseOperand(length, kArithmeticImm), g.UseRegister(value));
560}
561
562
563template <typename Matcher>
564static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
565 ArchOpcode opcode, bool left_can_cover,
566 bool right_can_cover, ImmediateMode imm_mode) {
567 Arm64OperandGenerator g(selector);
568
569 // Map instruction to equivalent operation with inverted right input.
570 ArchOpcode inv_opcode = opcode;
571 switch (opcode) {
572 case kArm64And32:
573 inv_opcode = kArm64Bic32;
574 break;
575 case kArm64And:
576 inv_opcode = kArm64Bic;
577 break;
578 case kArm64Or32:
579 inv_opcode = kArm64Orn32;
580 break;
581 case kArm64Or:
582 inv_opcode = kArm64Orn;
583 break;
584 case kArm64Eor32:
585 inv_opcode = kArm64Eon32;
586 break;
587 case kArm64Eor:
588 inv_opcode = kArm64Eon;
589 break;
590 default:
591 UNREACHABLE();
592 }
593
594 // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
595 if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
596 Matcher mleft(m->left().node());
597 if (mleft.right().Is(-1)) {
598 // TODO(all): support shifted operand on right.
599 selector->Emit(inv_opcode, g.DefineAsRegister(node),
600 g.UseRegister(m->right().node()),
601 g.UseRegister(mleft.left().node()));
602 return;
603 }
604 }
605
606 // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
607 if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
608 right_can_cover) {
609 Matcher mright(m->right().node());
610 if (mright.right().Is(-1)) {
611 // TODO(all): support shifted operand on right.
612 selector->Emit(inv_opcode, g.DefineAsRegister(node),
613 g.UseRegister(m->left().node()),
614 g.UseRegister(mright.left().node()));
615 return;
616 }
617 }
618
619 if (m->IsWord32Xor() && m->right().Is(-1)) {
620 selector->Emit(kArm64Not32, g.DefineAsRegister(node),
621 g.UseRegister(m->left().node()));
622 } else if (m->IsWord64Xor() && m->right().Is(-1)) {
623 selector->Emit(kArm64Not, g.DefineAsRegister(node),
624 g.UseRegister(m->left().node()));
625 } else {
626 VisitBinop<Matcher>(selector, node, opcode, imm_mode);
627 }
628}
629
630
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000631void InstructionSelector::VisitWord32And(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400632 Arm64OperandGenerator g(this);
633 Int32BinopMatcher m(node);
634 if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
635 m.right().HasValue()) {
636 uint32_t mask = m.right().Value();
637 uint32_t mask_width = base::bits::CountPopulation32(mask);
638 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
639 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
640 // The mask must be contiguous, and occupy the least-significant bits.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000641 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400642
643 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
644 // significant bits.
645 Int32BinopMatcher mleft(m.left().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000646 if (mleft.right().HasValue()) {
647 // Any shift value can match; int32 shifts use `value % 32`.
648 uint32_t lsb = mleft.right().Value() & 0x1f;
649
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400650 // Ubfx cannot extract bits past the register size, however since
651 // shifting the original value would have introduced some zeros we can
652 // still use ubfx with a smaller mask and the remaining bits will be
653 // zeros.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400654 if (lsb + mask_width > 32) mask_width = 32 - lsb;
655
656 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
657 g.UseRegister(mleft.left().node()),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000658 g.UseImmediateOrTemp(mleft.right().node(), lsb),
659 g.TempImmediate(mask_width));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400660 return;
661 }
662 // Other cases fall through to the normal And operation.
663 }
664 }
665 VisitLogical<Int32BinopMatcher>(
666 this, node, &m, kArm64And32, CanCover(node, m.left().node()),
667 CanCover(node, m.right().node()), kLogical32Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000668}
669
670
671void InstructionSelector::VisitWord64And(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400672 Arm64OperandGenerator g(this);
673 Int64BinopMatcher m(node);
674 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
675 m.right().HasValue()) {
676 uint64_t mask = m.right().Value();
677 uint64_t mask_width = base::bits::CountPopulation64(mask);
678 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
679 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
680 // The mask must be contiguous, and occupy the least-significant bits.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000681 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400682
683 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
684 // significant bits.
685 Int64BinopMatcher mleft(m.left().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000686 if (mleft.right().HasValue()) {
687 // Any shift value can match; int64 shifts use `value % 64`.
688 uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
689
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400690 // Ubfx cannot extract bits past the register size, however since
691 // shifting the original value would have introduced some zeros we can
692 // still use ubfx with a smaller mask and the remaining bits will be
693 // zeros.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400694 if (lsb + mask_width > 64) mask_width = 64 - lsb;
695
696 Emit(kArm64Ubfx, g.DefineAsRegister(node),
697 g.UseRegister(mleft.left().node()),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000698 g.UseImmediateOrTemp(mleft.right().node(), lsb),
699 g.TempImmediate(static_cast<int32_t>(mask_width)));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400700 return;
701 }
702 // Other cases fall through to the normal And operation.
703 }
704 }
705 VisitLogical<Int64BinopMatcher>(
706 this, node, &m, kArm64And, CanCover(node, m.left().node()),
707 CanCover(node, m.right().node()), kLogical64Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000708}
709
710
711void InstructionSelector::VisitWord32Or(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400712 Int32BinopMatcher m(node);
713 VisitLogical<Int32BinopMatcher>(
714 this, node, &m, kArm64Or32, CanCover(node, m.left().node()),
715 CanCover(node, m.right().node()), kLogical32Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000716}
717
718
719void InstructionSelector::VisitWord64Or(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400720 Int64BinopMatcher m(node);
721 VisitLogical<Int64BinopMatcher>(
722 this, node, &m, kArm64Or, CanCover(node, m.left().node()),
723 CanCover(node, m.right().node()), kLogical64Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000724}
725
726
727void InstructionSelector::VisitWord32Xor(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000728 Int32BinopMatcher m(node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400729 VisitLogical<Int32BinopMatcher>(
730 this, node, &m, kArm64Eor32, CanCover(node, m.left().node()),
731 CanCover(node, m.right().node()), kLogical32Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000732}
733
734
735void InstructionSelector::VisitWord64Xor(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000736 Int64BinopMatcher m(node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400737 VisitLogical<Int64BinopMatcher>(
738 this, node, &m, kArm64Eor, CanCover(node, m.left().node()),
739 CanCover(node, m.right().node()), kLogical64Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000740}
741
742
743void InstructionSelector::VisitWord32Shl(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000744 Int32BinopMatcher m(node);
745 if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
746 m.right().IsInRange(1, 31)) {
747 Arm64OperandGenerator g(this);
748 Int32BinopMatcher mleft(m.left().node());
749 if (mleft.right().HasValue()) {
750 uint32_t mask = mleft.right().Value();
751 uint32_t mask_width = base::bits::CountPopulation32(mask);
752 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
753 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
754 uint32_t shift = m.right().Value();
755 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
756 DCHECK_NE(0u, shift);
757
758 if ((shift + mask_width) >= 32) {
759 // If the mask is contiguous and reaches or extends beyond the top
760 // bit, only the shift is needed.
761 Emit(kArm64Lsl32, g.DefineAsRegister(node),
762 g.UseRegister(mleft.left().node()),
763 g.UseImmediate(m.right().node()));
764 return;
765 } else {
766 // Select Ubfiz for Shl(And(x, mask), imm) where the mask is
767 // contiguous, and the shift immediate non-zero.
768 Emit(kArm64Ubfiz32, g.DefineAsRegister(node),
769 g.UseRegister(mleft.left().node()),
770 g.UseImmediate(m.right().node()), g.TempImmediate(mask_width));
771 return;
772 }
773 }
774 }
775 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400776 VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000777}
778
779
780void InstructionSelector::VisitWord64Shl(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400781 Arm64OperandGenerator g(this);
782 Int64BinopMatcher m(node);
783 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
784 m.right().IsInRange(32, 63)) {
785 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
786 // 32 bits anyway.
787 Emit(kArm64Lsl, g.DefineAsRegister(node),
788 g.UseRegister(m.left().node()->InputAt(0)),
789 g.UseImmediate(m.right().node()));
790 return;
791 }
792 VisitRRO(this, kArm64Lsl, node, kShift64Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000793}
794
795
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000796namespace {
797
798bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
799 Arm64OperandGenerator g(selector);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400800 Int32BinopMatcher m(node);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000801 if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
802 // Select Ubfx or Sbfx for (x << (K & 0x1f)) OP (K & 0x1f), where
803 // OP is >>> or >> and (K & 0x1f) != 0.
804 Int32BinopMatcher mleft(m.left().node());
805 if (mleft.right().HasValue() && m.right().HasValue() &&
806 (mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
807 DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
808 ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
809
810 int right_val = m.right().Value() & 0x1f;
811 DCHECK_NE(right_val, 0);
812
813 selector->Emit(opcode, g.DefineAsRegister(node),
814 g.UseRegister(mleft.left().node()), g.TempImmediate(0),
815 g.TempImmediate(32 - right_val));
816 return true;
817 }
818 }
819 return false;
820}
821
822} // namespace
823
824
825void InstructionSelector::VisitWord32Shr(Node* node) {
826 Int32BinopMatcher m(node);
827 if (m.left().IsWord32And() && m.right().HasValue()) {
828 uint32_t lsb = m.right().Value() & 0x1f;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400829 Int32BinopMatcher mleft(m.left().node());
830 if (mleft.right().HasValue()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400831 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
832 // shifted into the least-significant bits.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000833 uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
834 unsigned mask_width = base::bits::CountPopulation32(mask);
835 unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400836 if ((mask_msb + mask_width + lsb) == 32) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000837 Arm64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400838 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
839 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000840 g.UseRegister(mleft.left().node()),
841 g.UseImmediateOrTemp(m.right().node(), lsb),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400842 g.TempImmediate(mask_width));
843 return;
844 }
845 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000846 } else if (TryEmitBitfieldExtract32(this, node)) {
847 return;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400848 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000849
850 if (m.left().IsUint32MulHigh() && m.right().HasValue() &&
851 CanCover(node, node->InputAt(0))) {
852 // Combine this shift with the multiply and shift that would be generated
853 // by Uint32MulHigh.
854 Arm64OperandGenerator g(this);
855 Node* left = m.left().node();
856 int shift = m.right().Value() & 0x1f;
857 InstructionOperand const smull_operand = g.TempRegister();
858 Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
859 g.UseRegister(left->InputAt(1)));
860 Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand,
861 g.TempImmediate(32 + shift));
862 return;
863 }
864
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400865 VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000866}
867
868
869void InstructionSelector::VisitWord64Shr(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400870 Int64BinopMatcher m(node);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000871 if (m.left().IsWord64And() && m.right().HasValue()) {
872 uint32_t lsb = m.right().Value() & 0x3f;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400873 Int64BinopMatcher mleft(m.left().node());
874 if (mleft.right().HasValue()) {
875 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
876 // shifted into the least-significant bits.
877 uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000878 unsigned mask_width = base::bits::CountPopulation64(mask);
879 unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400880 if ((mask_msb + mask_width + lsb) == 64) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000881 Arm64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400882 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
883 Emit(kArm64Ubfx, g.DefineAsRegister(node),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000884 g.UseRegister(mleft.left().node()),
885 g.UseImmediateOrTemp(m.right().node(), lsb),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400886 g.TempImmediate(mask_width));
887 return;
888 }
889 }
890 }
891 VisitRRO(this, kArm64Lsr, node, kShift64Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000892}
893
894
895void InstructionSelector::VisitWord32Sar(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000896 if (TryEmitBitfieldExtract32(this, node)) {
897 return;
898 }
899
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400900 Int32BinopMatcher m(node);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000901 if (m.left().IsInt32MulHigh() && m.right().HasValue() &&
902 CanCover(node, node->InputAt(0))) {
903 // Combine this shift with the multiply and shift that would be generated
904 // by Int32MulHigh.
905 Arm64OperandGenerator g(this);
906 Node* left = m.left().node();
907 int shift = m.right().Value() & 0x1f;
908 InstructionOperand const smull_operand = g.TempRegister();
909 Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
910 g.UseRegister(left->InputAt(1)));
911 Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand,
912 g.TempImmediate(32 + shift));
913 return;
914 }
915
916 if (m.left().IsInt32Add() && m.right().HasValue() &&
917 CanCover(node, node->InputAt(0))) {
918 Node* add_node = m.left().node();
919 Int32BinopMatcher madd_node(add_node);
920 if (madd_node.left().IsInt32MulHigh() &&
921 CanCover(add_node, madd_node.left().node())) {
922 // Combine the shift that would be generated by Int32MulHigh with the add
923 // on the left of this Sar operation. We do it here, as the result of the
924 // add potentially has 33 bits, so we have to ensure the result is
925 // truncated by being the input to this 32-bit Sar operation.
926 Arm64OperandGenerator g(this);
927 Node* mul_node = madd_node.left().node();
928
929 InstructionOperand const smull_operand = g.TempRegister();
930 Emit(kArm64Smull, smull_operand, g.UseRegister(mul_node->InputAt(0)),
931 g.UseRegister(mul_node->InputAt(1)));
932
933 InstructionOperand const add_operand = g.TempRegister();
934 Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
935 add_operand, g.UseRegister(add_node->InputAt(1)), smull_operand,
936 g.TempImmediate(32));
937
938 Emit(kArm64Asr32, g.DefineAsRegister(node), add_operand,
939 g.UseImmediate(node->InputAt(1)));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400940 return;
941 }
942 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000943
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400944 VisitRRO(this, kArm64Asr32, node, kShift32Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000945}
946
947
948void InstructionSelector::VisitWord64Sar(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400949 VisitRRO(this, kArm64Asr, node, kShift64Imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000950}
951
952
953void InstructionSelector::VisitWord32Ror(Node* node) {
954 VisitRRO(this, kArm64Ror32, node, kShift32Imm);
955}
956
957
958void InstructionSelector::VisitWord64Ror(Node* node) {
959 VisitRRO(this, kArm64Ror, node, kShift64Imm);
960}
961
962
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000963void InstructionSelector::VisitWord64Clz(Node* node) {
964 Arm64OperandGenerator g(this);
965 Emit(kArm64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
966}
967
968
969void InstructionSelector::VisitWord32Clz(Node* node) {
970 Arm64OperandGenerator g(this);
971 Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
972}
973
974
975void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
976
977
978void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
979
980
Ben Murdoch097c5b22016-05-18 11:27:45 +0100981void InstructionSelector::VisitWord32ReverseBits(Node* node) {
982 VisitRR(this, kArm64Rbit32, node);
983}
984
985
986void InstructionSelector::VisitWord64ReverseBits(Node* node) {
987 VisitRR(this, kArm64Rbit, node);
988}
989
990
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000991void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
992
993
994void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
995
996
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000997void InstructionSelector::VisitInt32Add(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400998 Arm64OperandGenerator g(this);
999 Int32BinopMatcher m(node);
1000 // Select Madd(x, y, z) for Add(Mul(x, y), z).
1001 if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
1002 Int32BinopMatcher mleft(m.left().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001003 // Check multiply can't be later reduced to addition with shift.
1004 if (LeftShiftForReducedMultiply(&mleft) == 0) {
1005 Emit(kArm64Madd32, g.DefineAsRegister(node),
1006 g.UseRegister(mleft.left().node()),
1007 g.UseRegister(mleft.right().node()),
1008 g.UseRegister(m.right().node()));
1009 return;
1010 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001011 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001012 // Select Madd(x, y, z) for Add(z, Mul(x, y)).
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001013 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
1014 Int32BinopMatcher mright(m.right().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001015 // Check multiply can't be later reduced to addition with shift.
1016 if (LeftShiftForReducedMultiply(&mright) == 0) {
1017 Emit(kArm64Madd32, g.DefineAsRegister(node),
1018 g.UseRegister(mright.left().node()),
1019 g.UseRegister(mright.right().node()),
1020 g.UseRegister(m.left().node()));
1021 return;
1022 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001023 }
1024 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001025}
1026
1027
1028void InstructionSelector::VisitInt64Add(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001029 Arm64OperandGenerator g(this);
1030 Int64BinopMatcher m(node);
1031 // Select Madd(x, y, z) for Add(Mul(x, y), z).
1032 if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
1033 Int64BinopMatcher mleft(m.left().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001034 // Check multiply can't be later reduced to addition with shift.
1035 if (LeftShiftForReducedMultiply(&mleft) == 0) {
1036 Emit(kArm64Madd, g.DefineAsRegister(node),
1037 g.UseRegister(mleft.left().node()),
1038 g.UseRegister(mleft.right().node()),
1039 g.UseRegister(m.right().node()));
1040 return;
1041 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001042 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001043 // Select Madd(x, y, z) for Add(z, Mul(x, y)).
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001044 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
1045 Int64BinopMatcher mright(m.right().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001046 // Check multiply can't be later reduced to addition with shift.
1047 if (LeftShiftForReducedMultiply(&mright) == 0) {
1048 Emit(kArm64Madd, g.DefineAsRegister(node),
1049 g.UseRegister(mright.left().node()),
1050 g.UseRegister(mright.right().node()),
1051 g.UseRegister(m.left().node()));
1052 return;
1053 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001054 }
1055 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001056}
1057
1058
1059void InstructionSelector::VisitInt32Sub(Node* node) {
1060 Arm64OperandGenerator g(this);
1061 Int32BinopMatcher m(node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001062
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001063 // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001064 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
1065 Int32BinopMatcher mright(m.right().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001066 // Check multiply can't be later reduced to addition with shift.
1067 if (LeftShiftForReducedMultiply(&mright) == 0) {
1068 Emit(kArm64Msub32, g.DefineAsRegister(node),
1069 g.UseRegister(mright.left().node()),
1070 g.UseRegister(mright.right().node()),
1071 g.UseRegister(m.left().node()));
1072 return;
1073 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001074 }
1075
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001076 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001077}
1078
1079
1080void InstructionSelector::VisitInt64Sub(Node* node) {
1081 Arm64OperandGenerator g(this);
1082 Int64BinopMatcher m(node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001083
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001084 // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001085 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
1086 Int64BinopMatcher mright(m.right().node());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001087 // Check multiply can't be later reduced to addition with shift.
1088 if (LeftShiftForReducedMultiply(&mright) == 0) {
1089 Emit(kArm64Msub, g.DefineAsRegister(node),
1090 g.UseRegister(mright.left().node()),
1091 g.UseRegister(mright.right().node()),
1092 g.UseRegister(m.left().node()));
1093 return;
1094 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001095 }
1096
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001097 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001098}
1099
1100
1101void InstructionSelector::VisitInt32Mul(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001102 Arm64OperandGenerator g(this);
1103 Int32BinopMatcher m(node);
1104
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001105 // First, try to reduce the multiplication to addition with left shift.
1106 // x * (2^k + 1) -> x + (x << k)
1107 int32_t shift = LeftShiftForReducedMultiply(&m);
1108 if (shift > 0) {
1109 Emit(kArm64Add32 | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1110 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1111 g.UseRegister(m.left().node()), g.TempImmediate(shift));
1112 return;
1113 }
1114
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001115 if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
1116 Int32BinopMatcher mleft(m.left().node());
1117
1118 // Select Mneg(x, y) for Mul(Sub(0, x), y).
1119 if (mleft.left().Is(0)) {
1120 Emit(kArm64Mneg32, g.DefineAsRegister(node),
1121 g.UseRegister(mleft.right().node()),
1122 g.UseRegister(m.right().node()));
1123 return;
1124 }
1125 }
1126
1127 if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
1128 Int32BinopMatcher mright(m.right().node());
1129
1130 // Select Mneg(x, y) for Mul(x, Sub(0, y)).
1131 if (mright.left().Is(0)) {
1132 Emit(kArm64Mneg32, g.DefineAsRegister(node),
1133 g.UseRegister(m.left().node()),
1134 g.UseRegister(mright.right().node()));
1135 return;
1136 }
1137 }
1138
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001139 VisitRRR(this, kArm64Mul32, node);
1140}
1141
1142
1143void InstructionSelector::VisitInt64Mul(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001144 Arm64OperandGenerator g(this);
1145 Int64BinopMatcher m(node);
1146
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001147 // First, try to reduce the multiplication to addition with left shift.
1148 // x * (2^k + 1) -> x + (x << k)
1149 int32_t shift = LeftShiftForReducedMultiply(&m);
1150 if (shift > 0) {
1151 Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1152 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1153 g.UseRegister(m.left().node()), g.TempImmediate(shift));
1154 return;
1155 }
1156
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001157 if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
1158 Int64BinopMatcher mleft(m.left().node());
1159
1160 // Select Mneg(x, y) for Mul(Sub(0, x), y).
1161 if (mleft.left().Is(0)) {
1162 Emit(kArm64Mneg, g.DefineAsRegister(node),
1163 g.UseRegister(mleft.right().node()),
1164 g.UseRegister(m.right().node()));
1165 return;
1166 }
1167 }
1168
1169 if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
1170 Int64BinopMatcher mright(m.right().node());
1171
1172 // Select Mneg(x, y) for Mul(x, Sub(0, y)).
1173 if (mright.left().Is(0)) {
1174 Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1175 g.UseRegister(mright.right().node()));
1176 return;
1177 }
1178 }
1179
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001180 VisitRRR(this, kArm64Mul, node);
1181}
1182
1183
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001184void InstructionSelector::VisitInt32MulHigh(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001185 Arm64OperandGenerator g(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001186 InstructionOperand const smull_operand = g.TempRegister();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001187 Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
1188 g.UseRegister(node->InputAt(1)));
1189 Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
1190}
1191
1192
1193void InstructionSelector::VisitUint32MulHigh(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001194 Arm64OperandGenerator g(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001195 InstructionOperand const smull_operand = g.TempRegister();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001196 Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
1197 g.UseRegister(node->InputAt(1)));
1198 Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
1199}
1200
1201
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001202void InstructionSelector::VisitInt32Div(Node* node) {
1203 VisitRRR(this, kArm64Idiv32, node);
1204}
1205
1206
1207void InstructionSelector::VisitInt64Div(Node* node) {
1208 VisitRRR(this, kArm64Idiv, node);
1209}
1210
1211
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001212void InstructionSelector::VisitUint32Div(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001213 VisitRRR(this, kArm64Udiv32, node);
1214}
1215
1216
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001217void InstructionSelector::VisitUint64Div(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001218 VisitRRR(this, kArm64Udiv, node);
1219}
1220
1221
1222void InstructionSelector::VisitInt32Mod(Node* node) {
1223 VisitRRR(this, kArm64Imod32, node);
1224}
1225
1226
1227void InstructionSelector::VisitInt64Mod(Node* node) {
1228 VisitRRR(this, kArm64Imod, node);
1229}
1230
1231
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001232void InstructionSelector::VisitUint32Mod(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001233 VisitRRR(this, kArm64Umod32, node);
1234}
1235
1236
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001237void InstructionSelector::VisitUint64Mod(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001238 VisitRRR(this, kArm64Umod, node);
1239}
1240
1241
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001242void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001243 VisitRR(this, kArm64Float32ToFloat64, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001244}
1245
1246
Ben Murdoch097c5b22016-05-18 11:27:45 +01001247void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1248 VisitRR(this, kArm64Int32ToFloat32, node);
1249}
1250
1251
1252void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1253 VisitRR(this, kArm64Uint32ToFloat32, node);
1254}
1255
1256
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001257void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001258 VisitRR(this, kArm64Int32ToFloat64, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001259}
1260
1261
1262void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001263 VisitRR(this, kArm64Uint32ToFloat64, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001264}
1265
1266
Ben Murdoch097c5b22016-05-18 11:27:45 +01001267void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1268 VisitRR(this, kArm64Float32ToInt32, node);
1269}
1270
1271
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001272void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001273 VisitRR(this, kArm64Float64ToInt32, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001274}
1275
1276
Ben Murdoch097c5b22016-05-18 11:27:45 +01001277void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1278 VisitRR(this, kArm64Float32ToUint32, node);
1279}
1280
1281
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001282void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001283 VisitRR(this, kArm64Float64ToUint32, node);
1284}
1285
1286
1287void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001288 Arm64OperandGenerator g(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001289
1290 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1291 InstructionOperand outputs[2];
1292 size_t output_count = 0;
1293 outputs[output_count++] = g.DefineAsRegister(node);
1294
1295 Node* success_output = NodeProperties::FindProjection(node, 1);
1296 if (success_output) {
1297 outputs[output_count++] = g.DefineAsRegister(success_output);
1298 }
1299
1300 Emit(kArm64Float32ToInt64, output_count, outputs, 1, inputs);
1301}
1302
1303
1304void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1305 Arm64OperandGenerator g(this);
1306
1307 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1308 InstructionOperand outputs[2];
1309 size_t output_count = 0;
1310 outputs[output_count++] = g.DefineAsRegister(node);
1311
1312 Node* success_output = NodeProperties::FindProjection(node, 1);
1313 if (success_output) {
1314 outputs[output_count++] = g.DefineAsRegister(success_output);
1315 }
1316
1317 Emit(kArm64Float64ToInt64, output_count, outputs, 1, inputs);
1318}
1319
1320
1321void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1322 Arm64OperandGenerator g(this);
1323
1324 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1325 InstructionOperand outputs[2];
1326 size_t output_count = 0;
1327 outputs[output_count++] = g.DefineAsRegister(node);
1328
1329 Node* success_output = NodeProperties::FindProjection(node, 1);
1330 if (success_output) {
1331 outputs[output_count++] = g.DefineAsRegister(success_output);
1332 }
1333
1334 Emit(kArm64Float32ToUint64, output_count, outputs, 1, inputs);
1335}
1336
1337
1338void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1339 Arm64OperandGenerator g(this);
1340
1341 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1342 InstructionOperand outputs[2];
1343 size_t output_count = 0;
1344 outputs[output_count++] = g.DefineAsRegister(node);
1345
1346 Node* success_output = NodeProperties::FindProjection(node, 1);
1347 if (success_output) {
1348 outputs[output_count++] = g.DefineAsRegister(success_output);
1349 }
1350
1351 Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001352}
1353
1354
1355void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001356 VisitRR(this, kArm64Sxtw, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001357}
1358
1359
1360void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1361 Arm64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001362 Node* value = node->InputAt(0);
1363 switch (value->opcode()) {
1364 case IrOpcode::kWord32And:
1365 case IrOpcode::kWord32Or:
1366 case IrOpcode::kWord32Xor:
1367 case IrOpcode::kWord32Shl:
1368 case IrOpcode::kWord32Shr:
1369 case IrOpcode::kWord32Sar:
1370 case IrOpcode::kWord32Ror:
1371 case IrOpcode::kWord32Equal:
1372 case IrOpcode::kInt32Add:
1373 case IrOpcode::kInt32AddWithOverflow:
1374 case IrOpcode::kInt32Sub:
1375 case IrOpcode::kInt32SubWithOverflow:
1376 case IrOpcode::kInt32Mul:
1377 case IrOpcode::kInt32MulHigh:
1378 case IrOpcode::kInt32Div:
1379 case IrOpcode::kInt32Mod:
1380 case IrOpcode::kInt32LessThan:
1381 case IrOpcode::kInt32LessThanOrEqual:
1382 case IrOpcode::kUint32Div:
1383 case IrOpcode::kUint32LessThan:
1384 case IrOpcode::kUint32LessThanOrEqual:
1385 case IrOpcode::kUint32Mod:
1386 case IrOpcode::kUint32MulHigh: {
1387 // 32-bit operations will write their result in a W register (implicitly
1388 // clearing the top 32-bit of the corresponding X register) so the
1389 // zero-extension is a no-op.
1390 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1391 return;
1392 }
1393 default:
1394 break;
1395 }
1396 Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
1397}
1398
1399
1400void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001401 VisitRR(this, kArm64Float64ToFloat32, node);
1402}
1403
1404
1405void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
1406 switch (TruncationModeOf(node->op())) {
1407 case TruncationMode::kJavaScript:
1408 return VisitRR(this, kArchTruncateDoubleToI, node);
1409 case TruncationMode::kRoundToZero:
1410 return VisitRR(this, kArm64Float64ToInt32, node);
1411 }
1412 UNREACHABLE();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001413}
1414
1415
1416void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1417 Arm64OperandGenerator g(this);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001418 Node* value = node->InputAt(0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001419 if (CanCover(node, value) && value->InputCount() >= 2) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001420 Int64BinopMatcher m(value);
1421 if ((m.IsWord64Sar() && m.right().HasValue() &&
1422 (m.right().Value() == 32)) ||
1423 (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
1424 Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1425 g.UseImmediate(m.right().node()));
1426 return;
1427 }
1428 }
1429
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001430 Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1431}
1432
1433
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001434void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1435 VisitRR(this, kArm64Int64ToFloat32, node);
1436}
1437
1438
1439void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1440 VisitRR(this, kArm64Int64ToFloat64, node);
1441}
1442
1443
1444void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1445 VisitRR(this, kArm64Uint64ToFloat32, node);
1446}
1447
1448
1449void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1450 VisitRR(this, kArm64Uint64ToFloat64, node);
1451}
1452
1453
1454void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1455 VisitRR(this, kArm64Float64ExtractLowWord32, node);
1456}
1457
1458
1459void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1460 VisitRR(this, kArm64U64MoveFloat64, node);
1461}
1462
1463
1464void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1465 VisitRR(this, kArm64Float64MoveU64, node);
1466}
1467
1468
1469void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1470 VisitRR(this, kArm64Float64MoveU64, node);
1471}
1472
1473
1474void InstructionSelector::VisitFloat32Add(Node* node) {
1475 VisitRRR(this, kArm64Float32Add, node);
1476}
1477
1478
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001479void InstructionSelector::VisitFloat64Add(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001480 VisitRRR(this, kArm64Float64Add, node);
1481}
1482
1483
1484void InstructionSelector::VisitFloat32Sub(Node* node) {
1485 VisitRRR(this, kArm64Float32Sub, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001486}
1487
1488
1489void InstructionSelector::VisitFloat64Sub(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001490 Arm64OperandGenerator g(this);
1491 Float64BinopMatcher m(node);
1492 if (m.left().IsMinusZero()) {
1493 if (m.right().IsFloat64RoundDown() &&
1494 CanCover(m.node(), m.right().node())) {
1495 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
1496 CanCover(m.right().node(), m.right().InputAt(0))) {
1497 Float64BinopMatcher mright0(m.right().InputAt(0));
1498 if (mright0.left().IsMinusZero()) {
1499 Emit(kArm64Float64RoundUp, g.DefineAsRegister(node),
1500 g.UseRegister(mright0.right().node()));
1501 return;
1502 }
1503 }
1504 }
1505 Emit(kArm64Float64Neg, g.DefineAsRegister(node),
1506 g.UseRegister(m.right().node()));
1507 return;
1508 }
1509 VisitRRR(this, kArm64Float64Sub, node);
1510}
1511
1512
1513void InstructionSelector::VisitFloat32Mul(Node* node) {
1514 VisitRRR(this, kArm64Float32Mul, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001515}
1516
1517
1518void InstructionSelector::VisitFloat64Mul(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001519 VisitRRR(this, kArm64Float64Mul, node);
1520}
1521
1522
1523void InstructionSelector::VisitFloat32Div(Node* node) {
1524 VisitRRR(this, kArm64Float32Div, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001525}
1526
1527
1528void InstructionSelector::VisitFloat64Div(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001529 VisitRRR(this, kArm64Float64Div, node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001530}
1531
1532
1533void InstructionSelector::VisitFloat64Mod(Node* node) {
1534 Arm64OperandGenerator g(this);
1535 Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
1536 g.UseFixed(node->InputAt(0), d0),
1537 g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
1538}
1539
1540
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001541void InstructionSelector::VisitFloat32Max(Node* node) {
1542 VisitRRR(this, kArm64Float32Max, node);
1543}
1544
1545
1546void InstructionSelector::VisitFloat64Max(Node* node) {
1547 VisitRRR(this, kArm64Float64Max, node);
1548}
1549
1550
1551void InstructionSelector::VisitFloat32Min(Node* node) {
1552 VisitRRR(this, kArm64Float32Min, node);
1553}
1554
1555
1556void InstructionSelector::VisitFloat64Min(Node* node) {
1557 VisitRRR(this, kArm64Float64Min, node);
1558}
1559
1560
1561void InstructionSelector::VisitFloat32Abs(Node* node) {
1562 VisitRR(this, kArm64Float32Abs, node);
1563}
1564
1565
1566void InstructionSelector::VisitFloat64Abs(Node* node) {
1567 VisitRR(this, kArm64Float64Abs, node);
1568}
1569
1570
1571void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1572 VisitRR(this, kArm64Float32Sqrt, node);
1573}
1574
1575
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001576void InstructionSelector::VisitFloat64Sqrt(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001577 VisitRR(this, kArm64Float64Sqrt, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001578}
1579
1580
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001581void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1582 VisitRR(this, kArm64Float32RoundDown, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001583}
1584
1585
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001586void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1587 VisitRR(this, kArm64Float64RoundDown, node);
1588}
1589
1590
1591void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1592 VisitRR(this, kArm64Float32RoundUp, node);
1593}
1594
1595
1596void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1597 VisitRR(this, kArm64Float64RoundUp, node);
1598}
1599
1600
1601void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1602 VisitRR(this, kArm64Float32RoundTruncate, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001603}
1604
1605
1606void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001607 VisitRR(this, kArm64Float64RoundTruncate, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001608}
1609
1610
1611void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001612 VisitRR(this, kArm64Float64RoundTiesAway, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001613}
1614
1615
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001616void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1617 VisitRR(this, kArm64Float32RoundTiesEven, node);
1618}
1619
1620
1621void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1622 VisitRR(this, kArm64Float64RoundTiesEven, node);
1623}
1624
1625
1626void InstructionSelector::EmitPrepareArguments(
1627 ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1628 Node* node) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001629 Arm64OperandGenerator g(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001630
Ben Murdoch097c5b22016-05-18 11:27:45 +01001631 bool to_native_stack = descriptor->UseNativeStack();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001632
Ben Murdoch097c5b22016-05-18 11:27:45 +01001633 int claim_count = static_cast<int>(arguments->size());
1634 int slot = claim_count - 1;
1635 if (to_native_stack) {
1636 // Native stack must always be aligned to 16 (2 words).
1637 claim_count = RoundUp(claim_count, 2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001638 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001639 // TODO(titzer): claim and poke probably take small immediates.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001640 // Bump the stack pointer(s).
Ben Murdoch097c5b22016-05-18 11:27:45 +01001641 if (claim_count > 0) {
1642 // TODO(titzer): it would be better to bump the csp here only
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001643 // and emit paired stores with increment for non c frames.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001644 ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
1645 Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001646 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001647
Ben Murdoch097c5b22016-05-18 11:27:45 +01001648 // Poke the arguments into the stack.
1649 ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001650 while (slot >= 0) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001651 Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001652 g.TempImmediate(slot));
1653 slot--;
1654 // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
1655 // same type.
1656 // Emit(kArm64PokePair, g.NoOutput(), g.UseRegister((*arguments)[slot]),
1657 // g.UseRegister((*arguments)[slot - 1]), g.TempImmediate(slot));
1658 // slot -= 2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001659 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001660}
1661
1662
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001663bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1664
1665
1666namespace {
1667
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001668// Shared routine for multiple compare operations.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001669void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1670 InstructionOperand left, InstructionOperand right,
1671 FlagsContinuation* cont) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001672 Arm64OperandGenerator g(selector);
1673 opcode = cont->Encode(opcode);
1674 if (cont->IsBranch()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001675 selector->Emit(opcode, g.NoOutput(), left, right,
1676 g.Label(cont->true_block()), g.Label(cont->false_block()));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001677 } else {
1678 DCHECK(cont->IsSet());
1679 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001680 }
1681}
1682
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001683
1684// Shared routine for multiple word compare operations.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001685void VisitWordCompare(InstructionSelector* selector, Node* node,
1686 InstructionCode opcode, FlagsContinuation* cont,
1687 bool commutative, ImmediateMode immediate_mode) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001688 Arm64OperandGenerator g(selector);
1689 Node* left = node->InputAt(0);
1690 Node* right = node->InputAt(1);
1691
1692 // Match immediates on left or right side of comparison.
1693 if (g.CanBeImmediate(right, immediate_mode)) {
1694 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1695 cont);
1696 } else if (g.CanBeImmediate(left, immediate_mode)) {
1697 if (!commutative) cont->Commute();
1698 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1699 cont);
1700 } else {
1701 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1702 cont);
1703 }
1704}
1705
1706
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001707void VisitWord32Compare(InstructionSelector* selector, Node* node,
1708 FlagsContinuation* cont) {
1709 Int32BinopMatcher m(node);
1710 ArchOpcode opcode = kArm64Cmp32;
1711
1712 // Select negated compare for comparisons with negated right input.
1713 if (m.right().IsInt32Sub()) {
1714 Node* sub = m.right().node();
1715 Int32BinopMatcher msub(sub);
1716 if (msub.left().Is(0)) {
1717 bool can_cover = selector->CanCover(node, sub);
1718 node->ReplaceInput(1, msub.right().node());
1719 // Even if the comparison node covers the subtraction, after the input
1720 // replacement above, the node still won't cover the input to the
1721 // subtraction; the subtraction still uses it.
1722 // In order to get shifted operations to work, we must remove the rhs
1723 // input to the subtraction, as TryMatchAnyShift requires this node to
1724 // cover the input shift. We do this by setting it to the lhs input,
1725 // as we know it's zero, and the result of the subtraction isn't used by
1726 // any other node.
1727 if (can_cover) sub->ReplaceInput(1, msub.left().node());
1728 opcode = kArm64Cmn32;
1729 }
1730 }
1731 VisitBinop<Int32BinopMatcher>(selector, node, opcode, kArithmeticImm, cont);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001732}
1733
1734
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001735void VisitWordTest(InstructionSelector* selector, Node* node,
1736 InstructionCode opcode, FlagsContinuation* cont) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001737 Arm64OperandGenerator g(selector);
1738 VisitCompare(selector, opcode, g.UseRegister(node), g.UseRegister(node),
1739 cont);
1740}
1741
1742
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001743void VisitWord32Test(InstructionSelector* selector, Node* node,
1744 FlagsContinuation* cont) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001745 VisitWordTest(selector, node, kArm64Tst32, cont);
1746}
1747
1748
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001749void VisitWord64Test(InstructionSelector* selector, Node* node,
1750 FlagsContinuation* cont) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001751 VisitWordTest(selector, node, kArm64Tst, cont);
1752}
1753
1754
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001755// Shared routine for multiple float32 compare operations.
1756void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1757 FlagsContinuation* cont) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001758 Arm64OperandGenerator g(selector);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001759 Float32BinopMatcher m(node);
1760 if (m.right().Is(0.0f)) {
1761 VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
1762 g.UseImmediate(m.right().node()), cont);
1763 } else if (m.left().Is(0.0f)) {
1764 cont->Commute();
1765 VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.right().node()),
1766 g.UseImmediate(m.left().node()), cont);
1767 } else {
1768 VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
1769 g.UseRegister(m.right().node()), cont);
1770 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001771}
1772
1773
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001774// Shared routine for multiple float64 compare operations.
1775void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1776 FlagsContinuation* cont) {
1777 Arm64OperandGenerator g(selector);
1778 Float64BinopMatcher m(node);
1779 if (m.right().Is(0.0)) {
1780 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
1781 g.UseImmediate(m.right().node()), cont);
1782 } else if (m.left().Is(0.0)) {
1783 cont->Commute();
1784 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.right().node()),
1785 g.UseImmediate(m.left().node()), cont);
1786 } else {
1787 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
1788 g.UseRegister(m.right().node()), cont);
1789 }
1790}
1791
1792} // namespace
1793
1794
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001795void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1796 BasicBlock* fbranch) {
1797 OperandGenerator g(this);
1798 Node* user = branch;
1799 Node* value = branch->InputAt(0);
1800
1801 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1802
1803 // Try to combine with comparisons against 0 by simply inverting the branch.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001804 while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
1805 Int32BinopMatcher m(value);
1806 if (m.right().Is(0)) {
1807 user = value;
1808 value = m.left().node();
1809 cont.Negate();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001810 } else {
1811 break;
1812 }
1813 }
1814
1815 // Try to combine the branch with a comparison.
1816 if (CanCover(user, value)) {
1817 switch (value->opcode()) {
1818 case IrOpcode::kWord32Equal:
1819 cont.OverwriteAndNegateIfEqual(kEqual);
1820 return VisitWord32Compare(this, value, &cont);
1821 case IrOpcode::kInt32LessThan:
1822 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1823 return VisitWord32Compare(this, value, &cont);
1824 case IrOpcode::kInt32LessThanOrEqual:
1825 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1826 return VisitWord32Compare(this, value, &cont);
1827 case IrOpcode::kUint32LessThan:
1828 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1829 return VisitWord32Compare(this, value, &cont);
1830 case IrOpcode::kUint32LessThanOrEqual:
1831 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1832 return VisitWord32Compare(this, value, &cont);
1833 case IrOpcode::kWord64Equal:
1834 cont.OverwriteAndNegateIfEqual(kEqual);
1835 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1836 kArithmeticImm);
1837 case IrOpcode::kInt64LessThan:
1838 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
1839 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1840 kArithmeticImm);
1841 case IrOpcode::kInt64LessThanOrEqual:
1842 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1843 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1844 kArithmeticImm);
1845 case IrOpcode::kUint64LessThan:
1846 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
1847 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1848 kArithmeticImm);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001849 case IrOpcode::kUint64LessThanOrEqual:
1850 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1851 return VisitWordCompare(this, value, kArm64Cmp, &cont, false,
1852 kArithmeticImm);
1853 case IrOpcode::kFloat32Equal:
1854 cont.OverwriteAndNegateIfEqual(kEqual);
1855 return VisitFloat32Compare(this, value, &cont);
1856 case IrOpcode::kFloat32LessThan:
1857 cont.OverwriteAndNegateIfEqual(kFloatLessThan);
1858 return VisitFloat32Compare(this, value, &cont);
1859 case IrOpcode::kFloat32LessThanOrEqual:
1860 cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
1861 return VisitFloat32Compare(this, value, &cont);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001862 case IrOpcode::kFloat64Equal:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001863 cont.OverwriteAndNegateIfEqual(kEqual);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001864 return VisitFloat64Compare(this, value, &cont);
1865 case IrOpcode::kFloat64LessThan:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001866 cont.OverwriteAndNegateIfEqual(kFloatLessThan);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001867 return VisitFloat64Compare(this, value, &cont);
1868 case IrOpcode::kFloat64LessThanOrEqual:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001869 cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001870 return VisitFloat64Compare(this, value, &cont);
1871 case IrOpcode::kProjection:
1872 // Check if this is the overflow output projection of an
1873 // <Operation>WithOverflow node.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001874 if (ProjectionIndexOf(value->op()) == 1u) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001875 // We cannot combine the <Operation>WithOverflow with this branch
1876 // unless the 0th projection (the use of the actual value of the
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001877 // <Operation> is either nullptr, which means there's no use of the
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001878 // actual value, or was already defined, which means it is scheduled
1879 // *AFTER* this branch).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001880 Node* const node = value->InputAt(0);
1881 Node* const result = NodeProperties::FindProjection(node, 0);
1882 if (result == nullptr || IsDefined(result)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001883 switch (node->opcode()) {
1884 case IrOpcode::kInt32AddWithOverflow:
1885 cont.OverwriteAndNegateIfEqual(kOverflow);
1886 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
1887 kArithmeticImm, &cont);
1888 case IrOpcode::kInt32SubWithOverflow:
1889 cont.OverwriteAndNegateIfEqual(kOverflow);
1890 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
1891 kArithmeticImm, &cont);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001892 case IrOpcode::kInt64AddWithOverflow:
1893 cont.OverwriteAndNegateIfEqual(kOverflow);
1894 return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add,
1895 kArithmeticImm, &cont);
1896 case IrOpcode::kInt64SubWithOverflow:
1897 cont.OverwriteAndNegateIfEqual(kOverflow);
1898 return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub,
1899 kArithmeticImm, &cont);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001900 default:
1901 break;
1902 }
1903 }
1904 }
1905 break;
1906 case IrOpcode::kInt32Add:
1907 return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
1908 kArithmeticImm);
1909 case IrOpcode::kInt32Sub:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001910 return VisitWord32Compare(this, value, &cont);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001911 case IrOpcode::kWord32And: {
1912 Int32BinopMatcher m(value);
1913 if (m.right().HasValue() &&
1914 (base::bits::CountPopulation32(m.right().Value()) == 1)) {
1915 // If the mask has only one bit set, we can use tbz/tbnz.
1916 DCHECK((cont.condition() == kEqual) ||
1917 (cont.condition() == kNotEqual));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001918 Emit(cont.Encode(kArm64TestAndBranch32), g.NoOutput(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001919 g.UseRegister(m.left().node()),
1920 g.TempImmediate(
1921 base::bits::CountTrailingZeros32(m.right().Value())),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001922 g.Label(cont.true_block()), g.Label(cont.false_block()));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001923 return;
1924 }
1925 return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
1926 kLogical32Imm);
1927 }
1928 case IrOpcode::kWord64And: {
1929 Int64BinopMatcher m(value);
1930 if (m.right().HasValue() &&
1931 (base::bits::CountPopulation64(m.right().Value()) == 1)) {
1932 // If the mask has only one bit set, we can use tbz/tbnz.
1933 DCHECK((cont.condition() == kEqual) ||
1934 (cont.condition() == kNotEqual));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001935 Emit(cont.Encode(kArm64TestAndBranch), g.NoOutput(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001936 g.UseRegister(m.left().node()),
1937 g.TempImmediate(
1938 base::bits::CountTrailingZeros64(m.right().Value())),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001939 g.Label(cont.true_block()), g.Label(cont.false_block()));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001940 return;
1941 }
1942 return VisitWordCompare(this, value, kArm64Tst, &cont, true,
1943 kLogical64Imm);
1944 }
1945 default:
1946 break;
1947 }
1948 }
1949
1950 // Branch could not be combined with a compare, compare against 0 and branch.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001951 Emit(cont.Encode(kArm64CompareAndBranch32), g.NoOutput(),
1952 g.UseRegister(value), g.Label(cont.true_block()),
1953 g.Label(cont.false_block()));
1954}
1955
1956
1957void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1958 Arm64OperandGenerator g(this);
1959 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1960
1961 // Emit either ArchTableSwitch or ArchLookupSwitch.
1962 size_t table_space_cost = 4 + sw.value_range;
1963 size_t table_time_cost = 3;
1964 size_t lookup_space_cost = 3 + 2 * sw.case_count;
1965 size_t lookup_time_cost = sw.case_count;
1966 if (sw.case_count > 0 &&
1967 table_space_cost + 3 * table_time_cost <=
1968 lookup_space_cost + 3 * lookup_time_cost &&
1969 sw.min_value > std::numeric_limits<int32_t>::min()) {
1970 InstructionOperand index_operand = value_operand;
1971 if (sw.min_value) {
1972 index_operand = g.TempRegister();
1973 Emit(kArm64Sub32, index_operand, value_operand,
1974 g.TempImmediate(sw.min_value));
1975 }
1976 // Generate a table lookup.
1977 return EmitTableSwitch(sw, index_operand);
1978 }
1979
1980 // Generate a sequence of conditional jumps.
1981 return EmitLookupSwitch(sw, value_operand);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001982}
1983
1984
1985void InstructionSelector::VisitWord32Equal(Node* const node) {
1986 Node* const user = node;
1987 FlagsContinuation cont(kEqual, node);
1988 Int32BinopMatcher m(user);
1989 if (m.right().Is(0)) {
1990 Node* const value = m.left().node();
1991 if (CanCover(user, value)) {
1992 switch (value->opcode()) {
1993 case IrOpcode::kInt32Add:
1994 return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
1995 kArithmeticImm);
1996 case IrOpcode::kInt32Sub:
1997 return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
1998 kArithmeticImm);
1999 case IrOpcode::kWord32And:
2000 return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
2001 kLogical32Imm);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002002 case IrOpcode::kWord32Equal: {
2003 // Word32Equal(Word32Equal(x, y), 0) => Word32Compare(x, y, ne).
2004 Int32BinopMatcher mequal(value);
2005 node->ReplaceInput(0, mequal.left().node());
2006 node->ReplaceInput(1, mequal.right().node());
2007 cont.Negate();
2008 return VisitWord32Compare(this, node, &cont);
2009 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002010 default:
2011 break;
2012 }
2013 return VisitWord32Test(this, value, &cont);
2014 }
2015 }
2016 VisitWord32Compare(this, node, &cont);
2017}
2018
2019
2020void InstructionSelector::VisitInt32LessThan(Node* node) {
2021 FlagsContinuation cont(kSignedLessThan, node);
2022 VisitWord32Compare(this, node, &cont);
2023}
2024
2025
2026void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2027 FlagsContinuation cont(kSignedLessThanOrEqual, node);
2028 VisitWord32Compare(this, node, &cont);
2029}
2030
2031
2032void InstructionSelector::VisitUint32LessThan(Node* node) {
2033 FlagsContinuation cont(kUnsignedLessThan, node);
2034 VisitWord32Compare(this, node, &cont);
2035}
2036
2037
2038void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2039 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
2040 VisitWord32Compare(this, node, &cont);
2041}
2042
2043
2044void InstructionSelector::VisitWord64Equal(Node* const node) {
2045 Node* const user = node;
2046 FlagsContinuation cont(kEqual, node);
2047 Int64BinopMatcher m(user);
2048 if (m.right().Is(0)) {
2049 Node* const value = m.left().node();
2050 if (CanCover(user, value)) {
2051 switch (value->opcode()) {
2052 case IrOpcode::kWord64And:
2053 return VisitWordCompare(this, value, kArm64Tst, &cont, true,
2054 kLogical64Imm);
2055 default:
2056 break;
2057 }
2058 return VisitWord64Test(this, value, &cont);
2059 }
2060 }
2061 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2062}
2063
2064
2065void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002066 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002067 FlagsContinuation cont(kOverflow, ovf);
2068 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
2069 kArithmeticImm, &cont);
2070 }
2071 FlagsContinuation cont;
2072 VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, &cont);
2073}
2074
2075
2076void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002077 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002078 FlagsContinuation cont(kOverflow, ovf);
2079 return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
2080 kArithmeticImm, &cont);
2081 }
2082 FlagsContinuation cont;
2083 VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, &cont);
2084}
2085
2086
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002087void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2088 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2089 FlagsContinuation cont(kOverflow, ovf);
2090 return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm,
2091 &cont);
2092 }
2093 FlagsContinuation cont;
2094 VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm, &cont);
2095}
2096
2097
2098void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2099 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2100 FlagsContinuation cont(kOverflow, ovf);
2101 return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm,
2102 &cont);
2103 }
2104 FlagsContinuation cont;
2105 VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm, &cont);
2106}
2107
2108
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002109void InstructionSelector::VisitInt64LessThan(Node* node) {
2110 FlagsContinuation cont(kSignedLessThan, node);
2111 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2112}
2113
2114
2115void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2116 FlagsContinuation cont(kSignedLessThanOrEqual, node);
2117 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2118}
2119
2120
2121void InstructionSelector::VisitUint64LessThan(Node* node) {
2122 FlagsContinuation cont(kUnsignedLessThan, node);
2123 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2124}
2125
2126
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002127void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2128 FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
2129 VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
2130}
2131
2132
2133void InstructionSelector::VisitFloat32Equal(Node* node) {
2134 FlagsContinuation cont(kEqual, node);
2135 VisitFloat32Compare(this, node, &cont);
2136}
2137
2138
2139void InstructionSelector::VisitFloat32LessThan(Node* node) {
2140 FlagsContinuation cont(kFloatLessThan, node);
2141 VisitFloat32Compare(this, node, &cont);
2142}
2143
2144
2145void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2146 FlagsContinuation cont(kFloatLessThanOrEqual, node);
2147 VisitFloat32Compare(this, node, &cont);
2148}
2149
2150
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002151void InstructionSelector::VisitFloat64Equal(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002152 FlagsContinuation cont(kEqual, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002153 VisitFloat64Compare(this, node, &cont);
2154}
2155
2156
2157void InstructionSelector::VisitFloat64LessThan(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002158 FlagsContinuation cont(kFloatLessThan, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002159 VisitFloat64Compare(this, node, &cont);
2160}
2161
2162
2163void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002164 FlagsContinuation cont(kFloatLessThanOrEqual, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002165 VisitFloat64Compare(this, node, &cont);
2166}
2167
2168
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002169void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2170 Arm64OperandGenerator g(this);
2171 Emit(kArm64Float64ExtractLowWord32, g.DefineAsRegister(node),
2172 g.UseRegister(node->InputAt(0)));
2173}
2174
2175
2176void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2177 Arm64OperandGenerator g(this);
2178 Emit(kArm64Float64ExtractHighWord32, g.DefineAsRegister(node),
2179 g.UseRegister(node->InputAt(0)));
2180}
2181
2182
2183void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2184 Arm64OperandGenerator g(this);
2185 Node* left = node->InputAt(0);
2186 Node* right = node->InputAt(1);
2187 if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
2188 CanCover(node, left)) {
2189 Node* right_of_left = left->InputAt(1);
2190 Emit(kArm64Bfi, g.DefineSameAsFirst(right), g.UseRegister(right),
2191 g.UseRegister(right_of_left), g.TempImmediate(32),
2192 g.TempImmediate(32));
2193 Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
2194 return;
2195 }
2196 Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
2197 g.UseRegister(left), g.UseRegister(right));
2198}
2199
2200
2201void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2202 Arm64OperandGenerator g(this);
2203 Node* left = node->InputAt(0);
2204 Node* right = node->InputAt(1);
2205 if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
2206 CanCover(node, left)) {
2207 Node* right_of_left = left->InputAt(1);
2208 Emit(kArm64Bfi, g.DefineSameAsFirst(left), g.UseRegister(right_of_left),
2209 g.UseRegister(right), g.TempImmediate(32), g.TempImmediate(32));
2210 Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
2211 return;
2212 }
2213 Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
2214 g.UseRegister(left), g.UseRegister(right));
2215}
2216
2217
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002218// static
2219MachineOperatorBuilder::Flags
2220InstructionSelector::SupportedMachineOperatorFlags() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002221 return MachineOperatorBuilder::kFloat32Max |
2222 MachineOperatorBuilder::kFloat32Min |
2223 MachineOperatorBuilder::kFloat32RoundDown |
2224 MachineOperatorBuilder::kFloat64Max |
2225 MachineOperatorBuilder::kFloat64Min |
2226 MachineOperatorBuilder::kFloat64RoundDown |
2227 MachineOperatorBuilder::kFloat32RoundUp |
2228 MachineOperatorBuilder::kFloat64RoundUp |
2229 MachineOperatorBuilder::kFloat32RoundTruncate |
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002230 MachineOperatorBuilder::kFloat64RoundTruncate |
2231 MachineOperatorBuilder::kFloat64RoundTiesAway |
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002232 MachineOperatorBuilder::kFloat32RoundTiesEven |
2233 MachineOperatorBuilder::kFloat64RoundTiesEven |
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002234 MachineOperatorBuilder::kWord32ShiftIsSafe |
2235 MachineOperatorBuilder::kInt32DivIsSafe |
Ben Murdoch097c5b22016-05-18 11:27:45 +01002236 MachineOperatorBuilder::kUint32DivIsSafe |
2237 MachineOperatorBuilder::kWord32ReverseBits |
2238 MachineOperatorBuilder::kWord64ReverseBits;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002239}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002240
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002241} // namespace compiler
2242} // namespace internal
2243} // namespace v8