blob: be406fbad26d5ee0b59e870d7b66a589660bf865 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007#include "src/ast/scopes.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00008#include "src/compiler/code-generator-impl.h"
9#include "src/compiler/gap-resolver.h"
10#include "src/compiler/node-matchers.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011#include "src/compiler/osr.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/x64/assembler-x64.h"
13#include "src/x64/macro-assembler-x64.h"
14
15namespace v8 {
16namespace internal {
17namespace compiler {
18
19#define __ masm()->
20
21
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000022#define kScratchDoubleReg xmm0
23
24
Ben Murdochb8a8cc12014-11-26 15:28:44 +000025// Adds X64 specific methods for decoding operands.
26class X64OperandConverter : public InstructionOperandConverter {
27 public:
28 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
29 : InstructionOperandConverter(gen, instr) {}
30
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000031 Immediate InputImmediate(size_t index) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000032 return ToImmediate(instr_->InputAt(index));
33 }
34
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000035 Operand InputOperand(size_t index, int extra = 0) {
36 return ToOperand(instr_->InputAt(index), extra);
37 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000038
Emily Bernierd0a1eb72015-03-24 16:35:39 -040039 Operand OutputOperand() { return ToOperand(instr_->Output()); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000040
41 Immediate ToImmediate(InstructionOperand* operand) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000042 Constant constant = ToConstant(operand);
43 if (constant.type() == Constant::kFloat64) {
44 DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
45 return Immediate(0);
46 }
47 return Immediate(constant.ToInt32());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000048 }
49
50 Operand ToOperand(InstructionOperand* op, int extra = 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000051 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000052 FrameOffset offset = frame_access_state()->GetFrameOffset(
53 AllocatedOperand::cast(op)->index());
54 return Operand(offset.from_stack_pointer() ? rsp : rbp,
55 offset.offset() + extra);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000056 }
57
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000058 static size_t NextOffset(size_t* offset) {
59 size_t i = *offset;
Emily Bernierd0a1eb72015-03-24 16:35:39 -040060 (*offset)++;
61 return i;
62 }
63
64 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
65 STATIC_ASSERT(0 == static_cast<int>(times_1));
66 STATIC_ASSERT(1 == static_cast<int>(times_2));
67 STATIC_ASSERT(2 == static_cast<int>(times_4));
68 STATIC_ASSERT(3 == static_cast<int>(times_8));
69 int scale = static_cast<int>(mode - one);
70 DCHECK(scale >= 0 && scale < 4);
71 return static_cast<ScaleFactor>(scale);
72 }
73
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000074 Operand MemoryOperand(size_t* offset) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -040075 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
76 switch (mode) {
77 case kMode_MR: {
78 Register base = InputRegister(NextOffset(offset));
79 int32_t disp = 0;
80 return Operand(base, disp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000081 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -040082 case kMode_MRI: {
83 Register base = InputRegister(NextOffset(offset));
84 int32_t disp = InputInt32(NextOffset(offset));
85 return Operand(base, disp);
86 }
87 case kMode_MR1:
88 case kMode_MR2:
89 case kMode_MR4:
90 case kMode_MR8: {
91 Register base = InputRegister(NextOffset(offset));
92 Register index = InputRegister(NextOffset(offset));
93 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
94 int32_t disp = 0;
95 return Operand(base, index, scale, disp);
96 }
97 case kMode_MR1I:
98 case kMode_MR2I:
99 case kMode_MR4I:
100 case kMode_MR8I: {
101 Register base = InputRegister(NextOffset(offset));
102 Register index = InputRegister(NextOffset(offset));
103 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
104 int32_t disp = InputInt32(NextOffset(offset));
105 return Operand(base, index, scale, disp);
106 }
107 case kMode_M1: {
108 Register base = InputRegister(NextOffset(offset));
109 int32_t disp = 0;
110 return Operand(base, disp);
111 }
112 case kMode_M2:
113 UNREACHABLE(); // Should use kModeMR with more compact encoding instead
114 return Operand(no_reg, 0);
115 case kMode_M4:
116 case kMode_M8: {
117 Register index = InputRegister(NextOffset(offset));
118 ScaleFactor scale = ScaleFor(kMode_M1, mode);
119 int32_t disp = 0;
120 return Operand(index, scale, disp);
121 }
122 case kMode_M1I:
123 case kMode_M2I:
124 case kMode_M4I:
125 case kMode_M8I: {
126 Register index = InputRegister(NextOffset(offset));
127 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
128 int32_t disp = InputInt32(NextOffset(offset));
129 return Operand(index, scale, disp);
130 }
131 case kMode_None:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000132 UNREACHABLE();
133 return Operand(no_reg, 0);
134 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400135 UNREACHABLE();
136 return Operand(no_reg, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000137 }
138
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000139 Operand MemoryOperand(size_t first_input = 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000140 return MemoryOperand(&first_input);
141 }
142};
143
144
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400145namespace {
146
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000147bool HasImmediateInput(Instruction* instr, size_t index) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000148 return instr->InputAt(index)->IsImmediate();
149}
150
151
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000152class OutOfLineLoadZero final : public OutOfLineCode {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400153 public:
154 OutOfLineLoadZero(CodeGenerator* gen, Register result)
155 : OutOfLineCode(gen), result_(result) {}
156
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000157 void Generate() final { __ xorl(result_, result_); }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400158
159 private:
160 Register const result_;
161};
162
163
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000164class OutOfLineLoadNaN final : public OutOfLineCode {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400165 public:
166 OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
167 : OutOfLineCode(gen), result_(result) {}
168
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000169 void Generate() final { __ Pcmpeqd(result_, result_); }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400170
171 private:
172 XMMRegister const result_;
173};
174
175
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000176class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400177 public:
178 OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
179 XMMRegister input)
180 : OutOfLineCode(gen), result_(result), input_(input) {}
181
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000182 void Generate() final {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400183 __ subp(rsp, Immediate(kDoubleSize));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000184 __ Movsd(MemOperand(rsp, 0), input_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400185 __ SlowTruncateToI(result_, rsp, 0);
186 __ addp(rsp, Immediate(kDoubleSize));
187 }
188
189 private:
190 Register const result_;
191 XMMRegister const input_;
192};
193
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000194
195class OutOfLineRecordWrite final : public OutOfLineCode {
196 public:
197 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
198 Register value, Register scratch0, Register scratch1,
199 RecordWriteMode mode)
200 : OutOfLineCode(gen),
201 object_(object),
202 operand_(operand),
203 value_(value),
204 scratch0_(scratch0),
205 scratch1_(scratch1),
206 mode_(mode) {}
207
208 void Generate() final {
209 if (mode_ > RecordWriteMode::kValueIsPointer) {
210 __ JumpIfSmi(value_, exit());
211 }
212 if (mode_ > RecordWriteMode::kValueIsMap) {
213 __ CheckPageFlag(value_, scratch0_,
214 MemoryChunk::kPointersToHereAreInterestingMask, zero,
215 exit());
216 }
217 SaveFPRegsMode const save_fp_mode =
218 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
219 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
220 EMIT_REMEMBERED_SET, save_fp_mode);
221 __ leap(scratch1_, operand_);
222 __ CallStub(&stub);
223 }
224
225 private:
226 Register const object_;
227 Operand const operand_;
228 Register const value_;
229 Register const scratch0_;
230 Register const scratch1_;
231 RecordWriteMode const mode_;
232};
233
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400234} // namespace
235
236
237#define ASSEMBLE_UNOP(asm_instr) \
238 do { \
239 if (instr->Output()->IsRegister()) { \
240 __ asm_instr(i.OutputRegister()); \
241 } else { \
242 __ asm_instr(i.OutputOperand()); \
243 } \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000244 } while (0)
245
246
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400247#define ASSEMBLE_BINOP(asm_instr) \
248 do { \
249 if (HasImmediateInput(instr, 1)) { \
250 if (instr->InputAt(0)->IsRegister()) { \
251 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
252 } else { \
253 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
254 } \
255 } else { \
256 if (instr->InputAt(1)->IsRegister()) { \
257 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
258 } else { \
259 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
260 } \
261 } \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000262 } while (0)
263
264
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400265#define ASSEMBLE_MULT(asm_instr) \
266 do { \
267 if (HasImmediateInput(instr, 1)) { \
268 if (instr->InputAt(0)->IsRegister()) { \
269 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
270 i.InputImmediate(1)); \
271 } else { \
272 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
273 i.InputImmediate(1)); \
274 } \
275 } else { \
276 if (instr->InputAt(1)->IsRegister()) { \
277 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
278 } else { \
279 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
280 } \
281 } \
282 } while (0)
283
284
285#define ASSEMBLE_SHIFT(asm_instr, width) \
286 do { \
287 if (HasImmediateInput(instr, 1)) { \
288 if (instr->Output()->IsRegister()) { \
289 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
290 } else { \
291 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
292 } \
293 } else { \
294 if (instr->Output()->IsRegister()) { \
295 __ asm_instr##_cl(i.OutputRegister()); \
296 } else { \
297 __ asm_instr##_cl(i.OutputOperand()); \
298 } \
299 } \
300 } while (0)
301
302
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000303#define ASSEMBLE_MOVX(asm_instr) \
304 do { \
305 if (instr->addressing_mode() != kMode_None) { \
306 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
307 } else if (instr->InputAt(0)->IsRegister()) { \
308 __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
309 } else { \
310 __ asm_instr(i.OutputRegister(), i.InputOperand(0)); \
311 } \
312 } while (0)
313
314
315#define ASSEMBLE_SSE_BINOP(asm_instr) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400316 do { \
317 if (instr->InputAt(1)->IsDoubleRegister()) { \
318 __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
319 } else { \
320 __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
321 } \
322 } while (0)
323
324
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000325#define ASSEMBLE_SSE_UNOP(asm_instr) \
326 do { \
327 if (instr->InputAt(0)->IsDoubleRegister()) { \
328 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
329 } else { \
330 __ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0)); \
331 } \
332 } while (0)
333
334
335#define ASSEMBLE_AVX_BINOP(asm_instr) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400336 do { \
337 CpuFeatureScope avx_scope(masm(), AVX); \
338 if (instr->InputAt(1)->IsDoubleRegister()) { \
339 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
340 i.InputDoubleRegister(1)); \
341 } else { \
342 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
343 i.InputOperand(1)); \
344 } \
345 } while (0)
346
347
348#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
349 do { \
350 auto result = i.OutputDoubleRegister(); \
351 auto buffer = i.InputRegister(0); \
352 auto index1 = i.InputRegister(1); \
353 auto index2 = i.InputInt32(2); \
354 OutOfLineCode* ool; \
355 if (instr->InputAt(3)->IsRegister()) { \
356 auto length = i.InputRegister(3); \
357 DCHECK_EQ(0, index2); \
358 __ cmpl(index1, length); \
359 ool = new (zone()) OutOfLineLoadNaN(this, result); \
360 } else { \
361 auto length = i.InputInt32(3); \
362 DCHECK_LE(index2, length); \
363 __ cmpq(index1, Immediate(length - index2)); \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000364 class OutOfLineLoadFloat final : public OutOfLineCode { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400365 public: \
366 OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
367 Register buffer, Register index1, int32_t index2, \
368 int32_t length) \
369 : OutOfLineCode(gen), \
370 result_(result), \
371 buffer_(buffer), \
372 index1_(index1), \
373 index2_(index2), \
374 length_(length) {} \
375 \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000376 void Generate() final { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400377 __ leal(kScratchRegister, Operand(index1_, index2_)); \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000378 __ Pcmpeqd(result_, result_); \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400379 __ cmpl(kScratchRegister, Immediate(length_)); \
380 __ j(above_equal, exit()); \
381 __ asm_instr(result_, \
382 Operand(buffer_, kScratchRegister, times_1, 0)); \
383 } \
384 \
385 private: \
386 XMMRegister const result_; \
387 Register const buffer_; \
388 Register const index1_; \
389 int32_t const index2_; \
390 int32_t const length_; \
391 }; \
392 ool = new (zone()) \
393 OutOfLineLoadFloat(this, result, buffer, index1, index2, length); \
394 } \
395 __ j(above_equal, ool->entry()); \
396 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
397 __ bind(ool->exit()); \
398 } while (false)
399
400
401#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
402 do { \
403 auto result = i.OutputRegister(); \
404 auto buffer = i.InputRegister(0); \
405 auto index1 = i.InputRegister(1); \
406 auto index2 = i.InputInt32(2); \
407 OutOfLineCode* ool; \
408 if (instr->InputAt(3)->IsRegister()) { \
409 auto length = i.InputRegister(3); \
410 DCHECK_EQ(0, index2); \
411 __ cmpl(index1, length); \
412 ool = new (zone()) OutOfLineLoadZero(this, result); \
413 } else { \
414 auto length = i.InputInt32(3); \
415 DCHECK_LE(index2, length); \
416 __ cmpq(index1, Immediate(length - index2)); \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000417 class OutOfLineLoadInteger final : public OutOfLineCode { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400418 public: \
419 OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
420 Register buffer, Register index1, int32_t index2, \
421 int32_t length) \
422 : OutOfLineCode(gen), \
423 result_(result), \
424 buffer_(buffer), \
425 index1_(index1), \
426 index2_(index2), \
427 length_(length) {} \
428 \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000429 void Generate() final { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400430 Label oob; \
431 __ leal(kScratchRegister, Operand(index1_, index2_)); \
432 __ cmpl(kScratchRegister, Immediate(length_)); \
433 __ j(above_equal, &oob, Label::kNear); \
434 __ asm_instr(result_, \
435 Operand(buffer_, kScratchRegister, times_1, 0)); \
436 __ jmp(exit()); \
437 __ bind(&oob); \
438 __ xorl(result_, result_); \
439 } \
440 \
441 private: \
442 Register const result_; \
443 Register const buffer_; \
444 Register const index1_; \
445 int32_t const index2_; \
446 int32_t const length_; \
447 }; \
448 ool = new (zone()) \
449 OutOfLineLoadInteger(this, result, buffer, index1, index2, length); \
450 } \
451 __ j(above_equal, ool->entry()); \
452 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
453 __ bind(ool->exit()); \
454 } while (false)
455
456
457#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
458 do { \
459 auto buffer = i.InputRegister(0); \
460 auto index1 = i.InputRegister(1); \
461 auto index2 = i.InputInt32(2); \
462 auto value = i.InputDoubleRegister(4); \
463 if (instr->InputAt(3)->IsRegister()) { \
464 auto length = i.InputRegister(3); \
465 DCHECK_EQ(0, index2); \
466 Label done; \
467 __ cmpl(index1, length); \
468 __ j(above_equal, &done, Label::kNear); \
469 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
470 __ bind(&done); \
471 } else { \
472 auto length = i.InputInt32(3); \
473 DCHECK_LE(index2, length); \
474 __ cmpq(index1, Immediate(length - index2)); \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000475 class OutOfLineStoreFloat final : public OutOfLineCode { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400476 public: \
477 OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
478 Register index1, int32_t index2, int32_t length, \
479 XMMRegister value) \
480 : OutOfLineCode(gen), \
481 buffer_(buffer), \
482 index1_(index1), \
483 index2_(index2), \
484 length_(length), \
485 value_(value) {} \
486 \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000487 void Generate() final { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400488 __ leal(kScratchRegister, Operand(index1_, index2_)); \
489 __ cmpl(kScratchRegister, Immediate(length_)); \
490 __ j(above_equal, exit()); \
491 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
492 value_); \
493 } \
494 \
495 private: \
496 Register const buffer_; \
497 Register const index1_; \
498 int32_t const index2_; \
499 int32_t const length_; \
500 XMMRegister const value_; \
501 }; \
502 auto ool = new (zone()) \
503 OutOfLineStoreFloat(this, buffer, index1, index2, length, value); \
504 __ j(above_equal, ool->entry()); \
505 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
506 __ bind(ool->exit()); \
507 } \
508 } while (false)
509
510
511#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
512 do { \
513 auto buffer = i.InputRegister(0); \
514 auto index1 = i.InputRegister(1); \
515 auto index2 = i.InputInt32(2); \
516 if (instr->InputAt(3)->IsRegister()) { \
517 auto length = i.InputRegister(3); \
518 DCHECK_EQ(0, index2); \
519 Label done; \
520 __ cmpl(index1, length); \
521 __ j(above_equal, &done, Label::kNear); \
522 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
523 __ bind(&done); \
524 } else { \
525 auto length = i.InputInt32(3); \
526 DCHECK_LE(index2, length); \
527 __ cmpq(index1, Immediate(length - index2)); \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000528 class OutOfLineStoreInteger final : public OutOfLineCode { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400529 public: \
530 OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
531 Register index1, int32_t index2, int32_t length, \
532 Value value) \
533 : OutOfLineCode(gen), \
534 buffer_(buffer), \
535 index1_(index1), \
536 index2_(index2), \
537 length_(length), \
538 value_(value) {} \
539 \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000540 void Generate() final { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400541 __ leal(kScratchRegister, Operand(index1_, index2_)); \
542 __ cmpl(kScratchRegister, Immediate(length_)); \
543 __ j(above_equal, exit()); \
544 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
545 value_); \
546 } \
547 \
548 private: \
549 Register const buffer_; \
550 Register const index1_; \
551 int32_t const index2_; \
552 int32_t const length_; \
553 Value const value_; \
554 }; \
555 auto ool = new (zone()) \
556 OutOfLineStoreInteger(this, buffer, index1, index2, length, value); \
557 __ j(above_equal, ool->entry()); \
558 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
559 __ bind(ool->exit()); \
560 } \
561 } while (false)
562
563
564#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
565 do { \
566 if (instr->InputAt(4)->IsRegister()) { \
567 Register value = i.InputRegister(4); \
568 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
569 } else { \
570 Immediate value = i.InputImmediate(4); \
571 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
572 } \
573 } while (false)
574
575
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000576void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
577 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
578 if (sp_slot_delta > 0) {
579 __ addq(rsp, Immediate(sp_slot_delta * kPointerSize));
580 }
581 frame_access_state()->SetFrameAccessToDefault();
582}
583
584
585void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
586 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
587 if (sp_slot_delta < 0) {
588 __ subq(rsp, Immediate(-sp_slot_delta * kPointerSize));
589 frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
590 }
591 if (frame()->needs_frame()) {
592 __ movq(rbp, MemOperand(rbp, 0));
593 }
594 frame_access_state()->SetFrameAccessToSP();
595}
596
597
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000598// Assembles an instruction after register allocation, producing machine code.
599void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
600 X64OperandConverter i(this, instr);
601
602 switch (ArchOpcodeField::decode(instr->opcode())) {
603 case kArchCallCodeObject: {
604 EnsureSpaceForLazyDeopt();
605 if (HasImmediateInput(instr, 0)) {
606 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
607 __ Call(code, RelocInfo::CODE_TARGET);
608 } else {
609 Register reg = i.InputRegister(0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000610 __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
611 __ call(reg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000612 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000613 RecordCallPosition(instr);
614 frame_access_state()->ClearSPDelta();
615 break;
616 }
617 case kArchTailCallCodeObject: {
618 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
619 AssembleDeconstructActivationRecord(stack_param_delta);
620 if (HasImmediateInput(instr, 0)) {
621 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
622 __ jmp(code, RelocInfo::CODE_TARGET);
623 } else {
624 Register reg = i.InputRegister(0);
625 __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
626 __ jmp(reg);
627 }
628 frame_access_state()->ClearSPDelta();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000629 break;
630 }
631 case kArchCallJSFunction: {
632 EnsureSpaceForLazyDeopt();
633 Register func = i.InputRegister(0);
634 if (FLAG_debug_code) {
635 // Check the function's context matches the context argument.
636 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
637 __ Assert(equal, kWrongFunctionContext);
638 }
639 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000640 frame_access_state()->ClearSPDelta();
641 RecordCallPosition(instr);
642 break;
643 }
644 case kArchTailCallJSFunction: {
645 Register func = i.InputRegister(0);
646 if (FLAG_debug_code) {
647 // Check the function's context matches the context argument.
648 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
649 __ Assert(equal, kWrongFunctionContext);
650 }
651 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
652 AssembleDeconstructActivationRecord(stack_param_delta);
653 __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
654 frame_access_state()->ClearSPDelta();
655 break;
656 }
657 case kArchLazyBailout: {
658 EnsureSpaceForLazyDeopt();
659 RecordCallPosition(instr);
660 break;
661 }
662 case kArchPrepareCallCFunction: {
663 // Frame alignment requires using FP-relative frame addressing.
664 frame_access_state()->SetFrameAccessToFP();
665 int const num_parameters = MiscField::decode(instr->opcode());
666 __ PrepareCallCFunction(num_parameters);
667 break;
668 }
669 case kArchPrepareTailCall:
670 AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
671 break;
672 case kArchCallCFunction: {
673 int const num_parameters = MiscField::decode(instr->opcode());
674 if (HasImmediateInput(instr, 0)) {
675 ExternalReference ref = i.InputExternalReference(0);
676 __ CallCFunction(ref, num_parameters);
677 } else {
678 Register func = i.InputRegister(0);
679 __ CallCFunction(func, num_parameters);
680 }
681 frame_access_state()->SetFrameAccessToDefault();
682 frame_access_state()->ClearSPDelta();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000683 break;
684 }
685 case kArchJmp:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400686 AssembleArchJump(i.InputRpo(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000687 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000688 case kArchLookupSwitch:
689 AssembleArchLookupSwitch(instr);
690 break;
691 case kArchTableSwitch:
692 AssembleArchTableSwitch(instr);
693 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000694 case kArchNop:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000695 case kArchThrowTerminator:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000696 // don't emit code for nops.
697 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000698 case kArchDeoptimize: {
699 int deopt_state_id =
700 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
701 Deoptimizer::BailoutType bailout_type =
702 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
703 AssembleDeoptimizerCall(deopt_state_id, bailout_type);
704 break;
705 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000706 case kArchRet:
707 AssembleReturn();
708 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400709 case kArchStackPointer:
710 __ movq(i.OutputRegister(), rsp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000711 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000712 case kArchFramePointer:
713 __ movq(i.OutputRegister(), rbp);
714 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400715 case kArchTruncateDoubleToI: {
716 auto result = i.OutputRegister();
717 auto input = i.InputDoubleRegister(0);
718 auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000719 __ Cvttsd2siq(result, input);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400720 __ cmpq(result, Immediate(1));
721 __ j(overflow, ool->entry());
722 __ bind(ool->exit());
723 break;
724 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000725 case kArchStoreWithWriteBarrier: {
726 RecordWriteMode mode =
727 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
728 Register object = i.InputRegister(0);
729 size_t index = 0;
730 Operand operand = i.MemoryOperand(&index);
731 Register value = i.InputRegister(index);
732 Register scratch0 = i.TempRegister(0);
733 Register scratch1 = i.TempRegister(1);
734 auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
735 scratch0, scratch1, mode);
736 __ movp(operand, value);
737 __ CheckPageFlag(object, scratch0,
738 MemoryChunk::kPointersFromHereAreInterestingMask,
739 not_zero, ool->entry());
740 __ bind(ool->exit());
741 break;
742 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000743 case kX64Add32:
744 ASSEMBLE_BINOP(addl);
745 break;
746 case kX64Add:
747 ASSEMBLE_BINOP(addq);
748 break;
749 case kX64Sub32:
750 ASSEMBLE_BINOP(subl);
751 break;
752 case kX64Sub:
753 ASSEMBLE_BINOP(subq);
754 break;
755 case kX64And32:
756 ASSEMBLE_BINOP(andl);
757 break;
758 case kX64And:
759 ASSEMBLE_BINOP(andq);
760 break;
761 case kX64Cmp32:
762 ASSEMBLE_BINOP(cmpl);
763 break;
764 case kX64Cmp:
765 ASSEMBLE_BINOP(cmpq);
766 break;
767 case kX64Test32:
768 ASSEMBLE_BINOP(testl);
769 break;
770 case kX64Test:
771 ASSEMBLE_BINOP(testq);
772 break;
773 case kX64Imul32:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400774 ASSEMBLE_MULT(imull);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000775 break;
776 case kX64Imul:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400777 ASSEMBLE_MULT(imulq);
778 break;
779 case kX64ImulHigh32:
780 if (instr->InputAt(1)->IsRegister()) {
781 __ imull(i.InputRegister(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000782 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400783 __ imull(i.InputOperand(1));
784 }
785 break;
786 case kX64UmulHigh32:
787 if (instr->InputAt(1)->IsRegister()) {
788 __ mull(i.InputRegister(1));
789 } else {
790 __ mull(i.InputOperand(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000791 }
792 break;
793 case kX64Idiv32:
794 __ cdq();
795 __ idivl(i.InputRegister(1));
796 break;
797 case kX64Idiv:
798 __ cqo();
799 __ idivq(i.InputRegister(1));
800 break;
801 case kX64Udiv32:
802 __ xorl(rdx, rdx);
803 __ divl(i.InputRegister(1));
804 break;
805 case kX64Udiv:
806 __ xorq(rdx, rdx);
807 __ divq(i.InputRegister(1));
808 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400809 case kX64Not:
810 ASSEMBLE_UNOP(notq);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000811 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400812 case kX64Not32:
813 ASSEMBLE_UNOP(notl);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000814 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400815 case kX64Neg:
816 ASSEMBLE_UNOP(negq);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000817 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400818 case kX64Neg32:
819 ASSEMBLE_UNOP(negl);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000820 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000821 case kX64Or32:
822 ASSEMBLE_BINOP(orl);
823 break;
824 case kX64Or:
825 ASSEMBLE_BINOP(orq);
826 break;
827 case kX64Xor32:
828 ASSEMBLE_BINOP(xorl);
829 break;
830 case kX64Xor:
831 ASSEMBLE_BINOP(xorq);
832 break;
833 case kX64Shl32:
834 ASSEMBLE_SHIFT(shll, 5);
835 break;
836 case kX64Shl:
837 ASSEMBLE_SHIFT(shlq, 6);
838 break;
839 case kX64Shr32:
840 ASSEMBLE_SHIFT(shrl, 5);
841 break;
842 case kX64Shr:
843 ASSEMBLE_SHIFT(shrq, 6);
844 break;
845 case kX64Sar32:
846 ASSEMBLE_SHIFT(sarl, 5);
847 break;
848 case kX64Sar:
849 ASSEMBLE_SHIFT(sarq, 6);
850 break;
851 case kX64Ror32:
852 ASSEMBLE_SHIFT(rorl, 5);
853 break;
854 case kX64Ror:
855 ASSEMBLE_SHIFT(rorq, 6);
856 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000857 case kX64Lzcnt:
858 if (instr->InputAt(0)->IsRegister()) {
859 __ Lzcntq(i.OutputRegister(), i.InputRegister(0));
860 } else {
861 __ Lzcntq(i.OutputRegister(), i.InputOperand(0));
862 }
863 break;
864 case kX64Lzcnt32:
865 if (instr->InputAt(0)->IsRegister()) {
866 __ Lzcntl(i.OutputRegister(), i.InputRegister(0));
867 } else {
868 __ Lzcntl(i.OutputRegister(), i.InputOperand(0));
869 }
870 break;
871 case kX64Tzcnt:
872 if (instr->InputAt(0)->IsRegister()) {
873 __ Tzcntq(i.OutputRegister(), i.InputRegister(0));
874 } else {
875 __ Tzcntq(i.OutputRegister(), i.InputOperand(0));
876 }
877 break;
878 case kX64Tzcnt32:
879 if (instr->InputAt(0)->IsRegister()) {
880 __ Tzcntl(i.OutputRegister(), i.InputRegister(0));
881 } else {
882 __ Tzcntl(i.OutputRegister(), i.InputOperand(0));
883 }
884 break;
885 case kX64Popcnt:
886 if (instr->InputAt(0)->IsRegister()) {
887 __ Popcntq(i.OutputRegister(), i.InputRegister(0));
888 } else {
889 __ Popcntq(i.OutputRegister(), i.InputOperand(0));
890 }
891 break;
892 case kX64Popcnt32:
893 if (instr->InputAt(0)->IsRegister()) {
894 __ Popcntl(i.OutputRegister(), i.InputRegister(0));
895 } else {
896 __ Popcntl(i.OutputRegister(), i.InputOperand(0));
897 }
898 break;
899 case kSSEFloat32Cmp:
900 ASSEMBLE_SSE_BINOP(Ucomiss);
901 break;
902 case kSSEFloat32Add:
903 ASSEMBLE_SSE_BINOP(addss);
904 break;
905 case kSSEFloat32Sub:
906 ASSEMBLE_SSE_BINOP(subss);
907 break;
908 case kSSEFloat32Mul:
909 ASSEMBLE_SSE_BINOP(mulss);
910 break;
911 case kSSEFloat32Div:
912 ASSEMBLE_SSE_BINOP(divss);
913 // Don't delete this mov. It may improve performance on some CPUs,
914 // when there is a (v)mulss depending on the result.
915 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
916 break;
917 case kSSEFloat32Abs: {
918 // TODO(bmeurer): Use RIP relative 128-bit constants.
919 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
920 __ psrlq(kScratchDoubleReg, 33);
921 __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
922 break;
923 }
924 case kSSEFloat32Neg: {
925 // TODO(bmeurer): Use RIP relative 128-bit constants.
926 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
927 __ psllq(kScratchDoubleReg, 31);
928 __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
929 break;
930 }
931 case kSSEFloat32Sqrt:
932 ASSEMBLE_SSE_UNOP(sqrtss);
933 break;
934 case kSSEFloat32Max:
935 ASSEMBLE_SSE_BINOP(maxss);
936 break;
937 case kSSEFloat32Min:
938 ASSEMBLE_SSE_BINOP(minss);
939 break;
940 case kSSEFloat32ToFloat64:
941 ASSEMBLE_SSE_UNOP(Cvtss2sd);
942 break;
943 case kSSEFloat32Round: {
944 CpuFeatureScope sse_scope(masm(), SSE4_1);
945 RoundingMode const mode =
946 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
947 __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
948 break;
949 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400950 case kSSEFloat64Cmp:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000951 ASSEMBLE_SSE_BINOP(Ucomisd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000952 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000953 case kSSEFloat64Add:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000954 ASSEMBLE_SSE_BINOP(addsd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000955 break;
956 case kSSEFloat64Sub:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000957 ASSEMBLE_SSE_BINOP(subsd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000958 break;
959 case kSSEFloat64Mul:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000960 ASSEMBLE_SSE_BINOP(mulsd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000961 break;
962 case kSSEFloat64Div:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000963 ASSEMBLE_SSE_BINOP(divsd);
964 // Don't delete this mov. It may improve performance on some CPUs,
965 // when there is a (v)mulsd depending on the result.
966 __ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000967 break;
968 case kSSEFloat64Mod: {
969 __ subq(rsp, Immediate(kDoubleSize));
970 // Move values to st(0) and st(1).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000971 __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000972 __ fld_d(Operand(rsp, 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000973 __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000974 __ fld_d(Operand(rsp, 0));
975 // Loop while fprem isn't done.
976 Label mod_loop;
977 __ bind(&mod_loop);
978 // This instructions traps on all kinds inputs, but we are assuming the
979 // floating point control word is set to ignore them all.
980 __ fprem();
981 // The following 2 instruction implicitly use rax.
982 __ fnstsw_ax();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400983 if (CpuFeatures::IsSupported(SAHF)) {
984 CpuFeatureScope sahf_scope(masm(), SAHF);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000985 __ sahf();
986 } else {
987 __ shrl(rax, Immediate(8));
988 __ andl(rax, Immediate(0xFF));
989 __ pushq(rax);
990 __ popfq();
991 }
992 __ j(parity_even, &mod_loop);
993 // Move output to stack and clean up.
994 __ fstp(1);
995 __ fstp_d(Operand(rsp, 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000996 __ Movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000997 __ addq(rsp, Immediate(kDoubleSize));
998 break;
999 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001000 case kSSEFloat64Max:
1001 ASSEMBLE_SSE_BINOP(maxsd);
1002 break;
1003 case kSSEFloat64Min:
1004 ASSEMBLE_SSE_BINOP(minsd);
1005 break;
1006 case kSSEFloat64Abs: {
1007 // TODO(bmeurer): Use RIP relative 128-bit constants.
1008 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1009 __ psrlq(kScratchDoubleReg, 1);
1010 __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
1011 break;
1012 }
1013 case kSSEFloat64Neg: {
1014 // TODO(bmeurer): Use RIP relative 128-bit constants.
1015 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1016 __ psllq(kScratchDoubleReg, 63);
1017 __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
1018 break;
1019 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001020 case kSSEFloat64Sqrt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001021 ASSEMBLE_SSE_UNOP(sqrtsd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001022 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001023 case kSSEFloat64Round: {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001024 CpuFeatureScope sse_scope(masm(), SSE4_1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001025 RoundingMode const mode =
1026 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
1027 __ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001028 break;
1029 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001030 case kSSEFloat64ToFloat32:
1031 ASSEMBLE_SSE_UNOP(Cvtsd2ss);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001032 break;
1033 case kSSEFloat64ToInt32:
1034 if (instr->InputAt(0)->IsDoubleRegister()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001035 __ Cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001036 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001037 __ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001038 }
1039 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001040 case kSSEFloat64ToUint32: {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001041 if (instr->InputAt(0)->IsDoubleRegister()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001042 __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001043 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001044 __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001045 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001046 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001047 break;
1048 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001049 case kSSEFloat32ToInt64:
1050 if (instr->InputAt(0)->IsDoubleRegister()) {
1051 __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
1052 } else {
1053 __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
1054 }
1055 if (instr->OutputCount() > 1) {
1056 __ Set(i.OutputRegister(1), 1);
1057 Label done;
1058 Label fail;
1059 __ Move(kScratchDoubleReg, static_cast<float>(INT64_MIN));
1060 if (instr->InputAt(0)->IsDoubleRegister()) {
1061 __ Ucomiss(kScratchDoubleReg, i.InputDoubleRegister(0));
1062 } else {
1063 __ Ucomiss(kScratchDoubleReg, i.InputOperand(0));
1064 }
1065 // If the input is NaN, then the conversion fails.
1066 __ j(parity_even, &fail);
1067 // If the input is INT64_MIN, then the conversion succeeds.
1068 __ j(equal, &done);
1069 __ cmpq(i.OutputRegister(0), Immediate(1));
1070 // If the conversion results in INT64_MIN, but the input was not
1071 // INT64_MIN, then the conversion fails.
1072 __ j(no_overflow, &done);
1073 __ bind(&fail);
1074 __ Set(i.OutputRegister(1), 0);
1075 __ bind(&done);
1076 }
1077 break;
1078 case kSSEFloat64ToInt64:
1079 if (instr->InputAt(0)->IsDoubleRegister()) {
1080 __ Cvttsd2siq(i.OutputRegister(0), i.InputDoubleRegister(0));
1081 } else {
1082 __ Cvttsd2siq(i.OutputRegister(0), i.InputOperand(0));
1083 }
1084 if (instr->OutputCount() > 1) {
1085 __ Set(i.OutputRegister(1), 1);
1086 Label done;
1087 Label fail;
1088 __ Move(kScratchDoubleReg, static_cast<double>(INT64_MIN));
1089 if (instr->InputAt(0)->IsDoubleRegister()) {
1090 __ Ucomisd(kScratchDoubleReg, i.InputDoubleRegister(0));
1091 } else {
1092 __ Ucomisd(kScratchDoubleReg, i.InputOperand(0));
1093 }
1094 // If the input is NaN, then the conversion fails.
1095 __ j(parity_even, &fail);
1096 // If the input is INT64_MIN, then the conversion succeeds.
1097 __ j(equal, &done);
1098 __ cmpq(i.OutputRegister(0), Immediate(1));
1099 // If the conversion results in INT64_MIN, but the input was not
1100 // INT64_MIN, then the conversion fails.
1101 __ j(no_overflow, &done);
1102 __ bind(&fail);
1103 __ Set(i.OutputRegister(1), 0);
1104 __ bind(&done);
1105 }
1106 break;
1107 case kSSEFloat32ToUint64: {
1108 Label done;
1109 Label success;
1110 if (instr->OutputCount() > 1) {
1111 __ Set(i.OutputRegister(1), 0);
1112 }
1113 // There does not exist a Float32ToUint64 instruction, so we have to use
1114 // the Float32ToInt64 instruction.
1115 if (instr->InputAt(0)->IsDoubleRegister()) {
1116 __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
1117 } else {
1118 __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
1119 }
1120 // Check if the result of the Float32ToInt64 conversion is positive, we
1121 // are already done.
1122 __ testq(i.OutputRegister(), i.OutputRegister());
1123 __ j(positive, &success);
1124 // The result of the first conversion was negative, which means that the
1125 // input value was not within the positive int64 range. We subtract 2^64
1126 // and convert it again to see if it is within the uint64 range.
1127 __ Move(kScratchDoubleReg, -9223372036854775808.0f);
1128 if (instr->InputAt(0)->IsDoubleRegister()) {
1129 __ addss(kScratchDoubleReg, i.InputDoubleRegister(0));
1130 } else {
1131 __ addss(kScratchDoubleReg, i.InputOperand(0));
1132 }
1133 __ Cvttss2siq(i.OutputRegister(), kScratchDoubleReg);
1134 __ testq(i.OutputRegister(), i.OutputRegister());
1135 // The only possible negative value here is 0x80000000000000000, which is
1136 // used on x64 to indicate an integer overflow.
1137 __ j(negative, &done);
1138 // The input value is within uint64 range and the second conversion worked
1139 // successfully, but we still have to undo the subtraction we did
1140 // earlier.
1141 __ Set(kScratchRegister, 0x8000000000000000);
1142 __ orq(i.OutputRegister(), kScratchRegister);
1143 __ bind(&success);
1144 if (instr->OutputCount() > 1) {
1145 __ Set(i.OutputRegister(1), 1);
1146 }
1147 __ bind(&done);
1148 break;
1149 }
1150 case kSSEFloat64ToUint64: {
1151 Label done;
1152 Label success;
1153 if (instr->OutputCount() > 1) {
1154 __ Set(i.OutputRegister(1), 0);
1155 }
1156 // There does not exist a Float64ToUint64 instruction, so we have to use
1157 // the Float64ToInt64 instruction.
1158 if (instr->InputAt(0)->IsDoubleRegister()) {
1159 __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
1160 } else {
1161 __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
1162 }
1163 // Check if the result of the Float64ToInt64 conversion is positive, we
1164 // are already done.
1165 __ testq(i.OutputRegister(), i.OutputRegister());
1166 __ j(positive, &success);
1167 // The result of the first conversion was negative, which means that the
1168 // input value was not within the positive int64 range. We subtract 2^64
1169 // and convert it again to see if it is within the uint64 range.
1170 __ Move(kScratchDoubleReg, -9223372036854775808.0);
1171 if (instr->InputAt(0)->IsDoubleRegister()) {
1172 __ addsd(kScratchDoubleReg, i.InputDoubleRegister(0));
1173 } else {
1174 __ addsd(kScratchDoubleReg, i.InputOperand(0));
1175 }
1176 __ Cvttsd2siq(i.OutputRegister(), kScratchDoubleReg);
1177 __ testq(i.OutputRegister(), i.OutputRegister());
1178 // The only possible negative value here is 0x80000000000000000, which is
1179 // used on x64 to indicate an integer overflow.
1180 __ j(negative, &done);
1181 // The input value is within uint64 range and the second conversion worked
1182 // successfully, but we still have to undo the subtraction we did
1183 // earlier.
1184 __ Set(kScratchRegister, 0x8000000000000000);
1185 __ orq(i.OutputRegister(), kScratchRegister);
1186 __ bind(&success);
1187 if (instr->OutputCount() > 1) {
1188 __ Set(i.OutputRegister(1), 1);
1189 }
1190 __ bind(&done);
1191 break;
1192 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001193 case kSSEInt32ToFloat64:
1194 if (instr->InputAt(0)->IsRegister()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001195 __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001196 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001197 __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001198 }
1199 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001200 case kSSEInt64ToFloat32:
1201 if (instr->InputAt(0)->IsRegister()) {
1202 __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
1203 } else {
1204 __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
1205 }
1206 break;
1207 case kSSEInt64ToFloat64:
1208 if (instr->InputAt(0)->IsRegister()) {
1209 __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
1210 } else {
1211 __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
1212 }
1213 break;
1214 case kSSEUint64ToFloat32:
1215 if (instr->InputAt(0)->IsRegister()) {
1216 __ movq(kScratchRegister, i.InputRegister(0));
1217 } else {
1218 __ movq(kScratchRegister, i.InputOperand(0));
1219 }
1220 __ Cvtqui2ss(i.OutputDoubleRegister(), kScratchRegister,
1221 i.TempRegister(0));
1222 break;
1223 case kSSEUint64ToFloat64:
1224 if (instr->InputAt(0)->IsRegister()) {
1225 __ movq(kScratchRegister, i.InputRegister(0));
1226 } else {
1227 __ movq(kScratchRegister, i.InputOperand(0));
1228 }
1229 __ Cvtqui2sd(i.OutputDoubleRegister(), kScratchRegister,
1230 i.TempRegister(0));
1231 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001232 case kSSEUint32ToFloat64:
1233 if (instr->InputAt(0)->IsRegister()) {
1234 __ movl(kScratchRegister, i.InputRegister(0));
1235 } else {
1236 __ movl(kScratchRegister, i.InputOperand(0));
1237 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001238 __ Cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001239 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001240 case kSSEFloat64ExtractLowWord32:
1241 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1242 __ movl(i.OutputRegister(), i.InputOperand(0));
1243 } else {
1244 __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
1245 }
1246 break;
1247 case kSSEFloat64ExtractHighWord32:
1248 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1249 __ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
1250 } else {
1251 __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
1252 }
1253 break;
1254 case kSSEFloat64InsertLowWord32:
1255 if (instr->InputAt(1)->IsRegister()) {
1256 __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0);
1257 } else {
1258 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
1259 }
1260 break;
1261 case kSSEFloat64InsertHighWord32:
1262 if (instr->InputAt(1)->IsRegister()) {
1263 __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1);
1264 } else {
1265 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
1266 }
1267 break;
1268 case kSSEFloat64LoadLowWord32:
1269 if (instr->InputAt(0)->IsRegister()) {
1270 __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
1271 } else {
1272 __ Movd(i.OutputDoubleRegister(), i.InputOperand(0));
1273 }
1274 break;
1275 case kAVXFloat32Cmp: {
1276 CpuFeatureScope avx_scope(masm(), AVX);
1277 if (instr->InputAt(1)->IsDoubleRegister()) {
1278 __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1279 } else {
1280 __ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
1281 }
1282 break;
1283 }
1284 case kAVXFloat32Add:
1285 ASSEMBLE_AVX_BINOP(vaddss);
1286 break;
1287 case kAVXFloat32Sub:
1288 ASSEMBLE_AVX_BINOP(vsubss);
1289 break;
1290 case kAVXFloat32Mul:
1291 ASSEMBLE_AVX_BINOP(vmulss);
1292 break;
1293 case kAVXFloat32Div:
1294 ASSEMBLE_AVX_BINOP(vdivss);
1295 // Don't delete this mov. It may improve performance on some CPUs,
1296 // when there is a (v)mulss depending on the result.
1297 __ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1298 break;
1299 case kAVXFloat32Max:
1300 ASSEMBLE_AVX_BINOP(vmaxss);
1301 break;
1302 case kAVXFloat32Min:
1303 ASSEMBLE_AVX_BINOP(vminss);
1304 break;
1305 case kAVXFloat64Cmp: {
1306 CpuFeatureScope avx_scope(masm(), AVX);
1307 if (instr->InputAt(1)->IsDoubleRegister()) {
1308 __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1309 } else {
1310 __ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
1311 }
1312 break;
1313 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001314 case kAVXFloat64Add:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001315 ASSEMBLE_AVX_BINOP(vaddsd);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001316 break;
1317 case kAVXFloat64Sub:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001318 ASSEMBLE_AVX_BINOP(vsubsd);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001319 break;
1320 case kAVXFloat64Mul:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001321 ASSEMBLE_AVX_BINOP(vmulsd);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001322 break;
1323 case kAVXFloat64Div:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001324 ASSEMBLE_AVX_BINOP(vdivsd);
1325 // Don't delete this mov. It may improve performance on some CPUs,
1326 // when there is a (v)mulsd depending on the result.
1327 __ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001328 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001329 case kAVXFloat64Max:
1330 ASSEMBLE_AVX_BINOP(vmaxsd);
1331 break;
1332 case kAVXFloat64Min:
1333 ASSEMBLE_AVX_BINOP(vminsd);
1334 break;
1335 case kAVXFloat32Abs: {
1336 // TODO(bmeurer): Use RIP relative 128-bit constants.
1337 CpuFeatureScope avx_scope(masm(), AVX);
1338 __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
1339 __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33);
1340 if (instr->InputAt(0)->IsDoubleRegister()) {
1341 __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1342 i.InputDoubleRegister(0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001343 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001344 __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1345 i.InputOperand(0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001346 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001347 break;
1348 }
1349 case kAVXFloat32Neg: {
1350 // TODO(bmeurer): Use RIP relative 128-bit constants.
1351 CpuFeatureScope avx_scope(masm(), AVX);
1352 __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
1353 __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31);
1354 if (instr->InputAt(0)->IsDoubleRegister()) {
1355 __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1356 i.InputDoubleRegister(0));
1357 } else {
1358 __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1359 i.InputOperand(0));
1360 }
1361 break;
1362 }
1363 case kAVXFloat64Abs: {
1364 // TODO(bmeurer): Use RIP relative 128-bit constants.
1365 CpuFeatureScope avx_scope(masm(), AVX);
1366 __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
1367 __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1);
1368 if (instr->InputAt(0)->IsDoubleRegister()) {
1369 __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1370 i.InputDoubleRegister(0));
1371 } else {
1372 __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1373 i.InputOperand(0));
1374 }
1375 break;
1376 }
1377 case kAVXFloat64Neg: {
1378 // TODO(bmeurer): Use RIP relative 128-bit constants.
1379 CpuFeatureScope avx_scope(masm(), AVX);
1380 __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
1381 __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63);
1382 if (instr->InputAt(0)->IsDoubleRegister()) {
1383 __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1384 i.InputDoubleRegister(0));
1385 } else {
1386 __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1387 i.InputOperand(0));
1388 }
1389 break;
1390 }
1391 case kX64Movsxbl:
1392 ASSEMBLE_MOVX(movsxbl);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001393 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001394 break;
1395 case kX64Movzxbl:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001396 ASSEMBLE_MOVX(movzxbl);
1397 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001398 break;
1399 case kX64Movb: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001400 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001401 Operand operand = i.MemoryOperand(&index);
1402 if (HasImmediateInput(instr, index)) {
1403 __ movb(operand, Immediate(i.InputInt8(index)));
1404 } else {
1405 __ movb(operand, i.InputRegister(index));
1406 }
1407 break;
1408 }
1409 case kX64Movsxwl:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001410 ASSEMBLE_MOVX(movsxwl);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001411 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001412 break;
1413 case kX64Movzxwl:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001414 ASSEMBLE_MOVX(movzxwl);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001415 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001416 break;
1417 case kX64Movw: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001418 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001419 Operand operand = i.MemoryOperand(&index);
1420 if (HasImmediateInput(instr, index)) {
1421 __ movw(operand, Immediate(i.InputInt16(index)));
1422 } else {
1423 __ movw(operand, i.InputRegister(index));
1424 }
1425 break;
1426 }
1427 case kX64Movl:
1428 if (instr->HasOutput()) {
1429 if (instr->addressing_mode() == kMode_None) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001430 if (instr->InputAt(0)->IsRegister()) {
1431 __ movl(i.OutputRegister(), i.InputRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001432 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001433 __ movl(i.OutputRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001434 }
1435 } else {
1436 __ movl(i.OutputRegister(), i.MemoryOperand());
1437 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001438 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001439 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001440 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001441 Operand operand = i.MemoryOperand(&index);
1442 if (HasImmediateInput(instr, index)) {
1443 __ movl(operand, i.InputImmediate(index));
1444 } else {
1445 __ movl(operand, i.InputRegister(index));
1446 }
1447 }
1448 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001449 case kX64Movsxlq:
1450 ASSEMBLE_MOVX(movsxlq);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001451 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001452 case kX64Movq:
1453 if (instr->HasOutput()) {
1454 __ movq(i.OutputRegister(), i.MemoryOperand());
1455 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001456 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001457 Operand operand = i.MemoryOperand(&index);
1458 if (HasImmediateInput(instr, index)) {
1459 __ movq(operand, i.InputImmediate(index));
1460 } else {
1461 __ movq(operand, i.InputRegister(index));
1462 }
1463 }
1464 break;
1465 case kX64Movss:
1466 if (instr->HasOutput()) {
1467 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001468 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001469 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001470 Operand operand = i.MemoryOperand(&index);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001471 __ movss(operand, i.InputDoubleRegister(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001472 }
1473 break;
1474 case kX64Movsd:
1475 if (instr->HasOutput()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001476 __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001477 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001478 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001479 Operand operand = i.MemoryOperand(&index);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001480 __ Movsd(operand, i.InputDoubleRegister(index));
1481 }
1482 break;
1483 case kX64BitcastFI:
1484 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1485 __ movl(i.OutputRegister(), i.InputOperand(0));
1486 } else {
1487 __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
1488 }
1489 break;
1490 case kX64BitcastDL:
1491 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1492 __ movq(i.OutputRegister(), i.InputOperand(0));
1493 } else {
1494 __ Movq(i.OutputRegister(), i.InputDoubleRegister(0));
1495 }
1496 break;
1497 case kX64BitcastIF:
1498 if (instr->InputAt(0)->IsRegister()) {
1499 __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
1500 } else {
1501 __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
1502 }
1503 break;
1504 case kX64BitcastLD:
1505 if (instr->InputAt(0)->IsRegister()) {
1506 __ Movq(i.OutputDoubleRegister(), i.InputRegister(0));
1507 } else {
1508 __ Movsd(i.OutputDoubleRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001509 }
1510 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001511 case kX64Lea32: {
1512 AddressingMode mode = AddressingModeField::decode(instr->opcode());
1513 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
1514 // and addressing mode just happens to work out. The "addl"/"subl" forms
1515 // in these cases are faster based on measurements.
1516 if (i.InputRegister(0).is(i.OutputRegister())) {
1517 if (mode == kMode_MRI) {
1518 int32_t constant_summand = i.InputInt32(1);
1519 if (constant_summand > 0) {
1520 __ addl(i.OutputRegister(), Immediate(constant_summand));
1521 } else if (constant_summand < 0) {
1522 __ subl(i.OutputRegister(), Immediate(-constant_summand));
1523 }
1524 } else if (mode == kMode_MR1) {
1525 if (i.InputRegister(1).is(i.OutputRegister())) {
1526 __ shll(i.OutputRegister(), Immediate(1));
1527 } else {
1528 __ leal(i.OutputRegister(), i.MemoryOperand());
1529 }
1530 } else if (mode == kMode_M2) {
1531 __ shll(i.OutputRegister(), Immediate(1));
1532 } else if (mode == kMode_M4) {
1533 __ shll(i.OutputRegister(), Immediate(2));
1534 } else if (mode == kMode_M8) {
1535 __ shll(i.OutputRegister(), Immediate(3));
1536 } else {
1537 __ leal(i.OutputRegister(), i.MemoryOperand());
1538 }
1539 } else {
1540 __ leal(i.OutputRegister(), i.MemoryOperand());
1541 }
1542 __ AssertZeroExtended(i.OutputRegister());
1543 break;
1544 }
1545 case kX64Lea:
1546 __ leaq(i.OutputRegister(), i.MemoryOperand());
1547 break;
1548 case kX64Dec32:
1549 __ decl(i.OutputRegister());
1550 break;
1551 case kX64Inc32:
1552 __ incl(i.OutputRegister());
1553 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001554 case kX64Push:
1555 if (HasImmediateInput(instr, 0)) {
1556 __ pushq(i.InputImmediate(0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001557 frame_access_state()->IncreaseSPDelta(1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001558 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001559 if (instr->InputAt(0)->IsRegister()) {
1560 __ pushq(i.InputRegister(0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001561 frame_access_state()->IncreaseSPDelta(1);
1562 } else if (instr->InputAt(0)->IsDoubleRegister()) {
1563 // TODO(titzer): use another machine instruction?
1564 __ subq(rsp, Immediate(kDoubleSize));
1565 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1566 __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001567 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001568 __ pushq(i.InputOperand(0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001569 frame_access_state()->IncreaseSPDelta(1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001570 }
1571 }
1572 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001573 case kX64Poke: {
1574 int const slot = MiscField::decode(instr->opcode());
1575 if (HasImmediateInput(instr, 0)) {
1576 __ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
1577 } else {
1578 __ movq(Operand(rsp, slot * kPointerSize), i.InputRegister(0));
1579 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001580 break;
1581 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001582 case kCheckedLoadInt8:
1583 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
1584 break;
1585 case kCheckedLoadUint8:
1586 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
1587 break;
1588 case kCheckedLoadInt16:
1589 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
1590 break;
1591 case kCheckedLoadUint16:
1592 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
1593 break;
1594 case kCheckedLoadWord32:
1595 ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
1596 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001597 case kCheckedLoadWord64:
1598 ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
1599 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001600 case kCheckedLoadFloat32:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001601 ASSEMBLE_CHECKED_LOAD_FLOAT(Movss);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001602 break;
1603 case kCheckedLoadFloat64:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001604 ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001605 break;
1606 case kCheckedStoreWord8:
1607 ASSEMBLE_CHECKED_STORE_INTEGER(movb);
1608 break;
1609 case kCheckedStoreWord16:
1610 ASSEMBLE_CHECKED_STORE_INTEGER(movw);
1611 break;
1612 case kCheckedStoreWord32:
1613 ASSEMBLE_CHECKED_STORE_INTEGER(movl);
1614 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001615 case kCheckedStoreWord64:
1616 ASSEMBLE_CHECKED_STORE_INTEGER(movq);
1617 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001618 case kCheckedStoreFloat32:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001619 ASSEMBLE_CHECKED_STORE_FLOAT(Movss);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001620 break;
1621 case kCheckedStoreFloat64:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001622 ASSEMBLE_CHECKED_STORE_FLOAT(Movsd);
1623 break;
1624 case kX64StackCheck:
1625 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001626 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001627 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001628} // NOLINT(readability/fn_size)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001629
1630
1631// Assembles branches after this instruction.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001632void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001633 X64OperandConverter i(this, instr);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001634 Label::Distance flabel_distance =
1635 branch->fallthru ? Label::kNear : Label::kFar;
1636 Label* tlabel = branch->true_label;
1637 Label* flabel = branch->false_label;
1638 switch (branch->condition) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001639 case kUnorderedEqual:
1640 __ j(parity_even, flabel, flabel_distance);
1641 // Fall through.
1642 case kEqual:
1643 __ j(equal, tlabel);
1644 break;
1645 case kUnorderedNotEqual:
1646 __ j(parity_even, tlabel);
1647 // Fall through.
1648 case kNotEqual:
1649 __ j(not_equal, tlabel);
1650 break;
1651 case kSignedLessThan:
1652 __ j(less, tlabel);
1653 break;
1654 case kSignedGreaterThanOrEqual:
1655 __ j(greater_equal, tlabel);
1656 break;
1657 case kSignedLessThanOrEqual:
1658 __ j(less_equal, tlabel);
1659 break;
1660 case kSignedGreaterThan:
1661 __ j(greater, tlabel);
1662 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001663 case kUnsignedLessThan:
1664 __ j(below, tlabel);
1665 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001666 case kUnsignedGreaterThanOrEqual:
1667 __ j(above_equal, tlabel);
1668 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001669 case kUnsignedLessThanOrEqual:
1670 __ j(below_equal, tlabel);
1671 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001672 case kUnsignedGreaterThan:
1673 __ j(above, tlabel);
1674 break;
1675 case kOverflow:
1676 __ j(overflow, tlabel);
1677 break;
1678 case kNotOverflow:
1679 __ j(no_overflow, tlabel);
1680 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001681 default:
1682 UNREACHABLE();
1683 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001684 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001685 if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1686}
1687
1688
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001689void CodeGenerator::AssembleArchJump(RpoNumber target) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001690 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001691}
1692
1693
1694// Assembles boolean materializations after this instruction.
1695void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1696 FlagsCondition condition) {
1697 X64OperandConverter i(this, instr);
1698 Label done;
1699
1700 // Materialize a full 64-bit 1 or 0 value. The result register is always the
1701 // last output of the instruction.
1702 Label check;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001703 DCHECK_NE(0u, instr->OutputCount());
1704 Register reg = i.OutputRegister(instr->OutputCount() - 1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001705 Condition cc = no_condition;
1706 switch (condition) {
1707 case kUnorderedEqual:
1708 __ j(parity_odd, &check, Label::kNear);
1709 __ movl(reg, Immediate(0));
1710 __ jmp(&done, Label::kNear);
1711 // Fall through.
1712 case kEqual:
1713 cc = equal;
1714 break;
1715 case kUnorderedNotEqual:
1716 __ j(parity_odd, &check, Label::kNear);
1717 __ movl(reg, Immediate(1));
1718 __ jmp(&done, Label::kNear);
1719 // Fall through.
1720 case kNotEqual:
1721 cc = not_equal;
1722 break;
1723 case kSignedLessThan:
1724 cc = less;
1725 break;
1726 case kSignedGreaterThanOrEqual:
1727 cc = greater_equal;
1728 break;
1729 case kSignedLessThanOrEqual:
1730 cc = less_equal;
1731 break;
1732 case kSignedGreaterThan:
1733 cc = greater;
1734 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001735 case kUnsignedLessThan:
1736 cc = below;
1737 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001738 case kUnsignedGreaterThanOrEqual:
1739 cc = above_equal;
1740 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001741 case kUnsignedLessThanOrEqual:
1742 cc = below_equal;
1743 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001744 case kUnsignedGreaterThan:
1745 cc = above;
1746 break;
1747 case kOverflow:
1748 cc = overflow;
1749 break;
1750 case kNotOverflow:
1751 cc = no_overflow;
1752 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001753 default:
1754 UNREACHABLE();
1755 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001756 }
1757 __ bind(&check);
1758 __ setcc(cc, reg);
1759 __ movzxbl(reg, reg);
1760 __ bind(&done);
1761}
1762
1763
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001764void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1765 X64OperandConverter i(this, instr);
1766 Register input = i.InputRegister(0);
1767 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1768 __ cmpl(input, Immediate(i.InputInt32(index + 0)));
1769 __ j(equal, GetLabel(i.InputRpo(index + 1)));
1770 }
1771 AssembleArchJump(i.InputRpo(1));
1772}
1773
1774
1775void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1776 X64OperandConverter i(this, instr);
1777 Register input = i.InputRegister(0);
1778 int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1779 Label** cases = zone()->NewArray<Label*>(case_count);
1780 for (int32_t index = 0; index < case_count; ++index) {
1781 cases[index] = GetLabel(i.InputRpo(index + 2));
1782 }
1783 Label* const table = AddJumpTable(cases, case_count);
1784 __ cmpl(input, Immediate(case_count));
1785 __ j(above_equal, GetLabel(i.InputRpo(1)));
1786 __ leaq(kScratchRegister, Operand(table));
1787 __ jmp(Operand(kScratchRegister, input, times_8, 0));
1788}
1789
1790
1791void CodeGenerator::AssembleDeoptimizerCall(
1792 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001793 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001794 isolate(), deoptimization_id, bailout_type);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001795 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1796}
1797
1798
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001799namespace {
1800
1801static const int kQuadWordSize = 16;
1802
1803} // namespace
1804
1805
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001806void CodeGenerator::AssemblePrologue() {
1807 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001808 if (descriptor->IsCFunctionCall()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001809 __ pushq(rbp);
1810 __ movq(rbp, rsp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001811 } else if (descriptor->IsJSFunctionCall()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001812 __ Prologue(this->info()->GeneratePreagedPrologue());
1813 } else if (frame()->needs_frame()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001814 __ StubPrologue();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001815 } else {
1816 frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001817 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001818 frame_access_state()->SetFrameAccessToDefault();
1819
1820 int stack_shrink_slots = frame()->GetSpillSlotCount();
1821 if (info()->is_osr()) {
1822 // TurboFan OSR-compiled functions cannot be entered directly.
1823 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1824
1825 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1826 // frame is still on the stack. Optimized code uses OSR values directly from
1827 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1828 // remaining stack slots.
1829 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1830 osr_pc_offset_ = __ pc_offset();
1831 // TODO(titzer): cannot address target function == local #-1
1832 __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
1833 stack_shrink_slots -=
1834 static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
1835 }
1836
1837 const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1838 if (saves_fp != 0) {
1839 stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
1840 }
1841 if (stack_shrink_slots > 0) {
1842 __ subq(rsp, Immediate(stack_shrink_slots * kPointerSize));
1843 }
1844
1845 if (saves_fp != 0) { // Save callee-saved XMM registers.
1846 const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
1847 const int stack_size = saves_fp_count * kQuadWordSize;
1848 // Adjust the stack pointer.
1849 __ subp(rsp, Immediate(stack_size));
1850 // Store the registers on the stack.
1851 int slot_idx = 0;
1852 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1853 if (!((1 << i) & saves_fp)) continue;
1854 __ movdqu(Operand(rsp, kQuadWordSize * slot_idx),
1855 XMMRegister::from_code(i));
1856 slot_idx++;
1857 }
1858 frame()->AllocateSavedCalleeRegisterSlots(saves_fp_count *
1859 (kQuadWordSize / kPointerSize));
1860 }
1861
1862 const RegList saves = descriptor->CalleeSavedRegisters();
1863 if (saves != 0) { // Save callee-saved registers.
1864 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1865 if (!((1 << i) & saves)) continue;
1866 __ pushq(Register::from_code(i));
1867 frame()->AllocateSavedCalleeRegisterSlots(1);
1868 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001869 }
1870}
1871
1872
1873void CodeGenerator::AssembleReturn() {
1874 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001875
1876 // Restore registers.
1877 const RegList saves = descriptor->CalleeSavedRegisters();
1878 if (saves != 0) {
1879 for (int i = 0; i < Register::kNumRegisters; i++) {
1880 if (!((1 << i) & saves)) continue;
1881 __ popq(Register::from_code(i));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001882 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001883 }
1884 const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1885 if (saves_fp != 0) {
1886 const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
1887 const int stack_size = saves_fp_count * kQuadWordSize;
1888 // Load the registers from the stack.
1889 int slot_idx = 0;
1890 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1891 if (!((1 << i) & saves_fp)) continue;
1892 __ movdqu(XMMRegister::from_code(i),
1893 Operand(rsp, kQuadWordSize * slot_idx));
1894 slot_idx++;
1895 }
1896 // Adjust the stack pointer.
1897 __ addp(rsp, Immediate(stack_size));
1898 }
1899
1900 if (descriptor->IsCFunctionCall()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001901 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1902 __ popq(rbp); // Pop caller's frame pointer.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001903 } else if (frame()->needs_frame()) {
1904 // Canonicalize JSFunction return sites for now.
1905 if (return_label_.is_bound()) {
1906 __ jmp(&return_label_);
1907 return;
1908 } else {
1909 __ bind(&return_label_);
1910 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1911 __ popq(rbp); // Pop caller's frame pointer.
1912 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001913 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001914 size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
1915 // Might need rcx for scratch if pop_size is too big.
1916 DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rcx.bit());
1917 __ Ret(static_cast<int>(pop_size), rcx);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001918}
1919
1920
1921void CodeGenerator::AssembleMove(InstructionOperand* source,
1922 InstructionOperand* destination) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001923 X64OperandConverter g(this, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001924 // Dispatch on the source and destination operand kinds. Not all
1925 // combinations are possible.
1926 if (source->IsRegister()) {
1927 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1928 Register src = g.ToRegister(source);
1929 if (destination->IsRegister()) {
1930 __ movq(g.ToRegister(destination), src);
1931 } else {
1932 __ movq(g.ToOperand(destination), src);
1933 }
1934 } else if (source->IsStackSlot()) {
1935 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1936 Operand src = g.ToOperand(source);
1937 if (destination->IsRegister()) {
1938 Register dst = g.ToRegister(destination);
1939 __ movq(dst, src);
1940 } else {
1941 // Spill on demand to use a temporary register for memory-to-memory
1942 // moves.
1943 Register tmp = kScratchRegister;
1944 Operand dst = g.ToOperand(destination);
1945 __ movq(tmp, src);
1946 __ movq(dst, tmp);
1947 }
1948 } else if (source->IsConstant()) {
1949 ConstantOperand* constant_source = ConstantOperand::cast(source);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001950 Constant src = g.ToConstant(constant_source);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001951 if (destination->IsRegister() || destination->IsStackSlot()) {
1952 Register dst = destination->IsRegister() ? g.ToRegister(destination)
1953 : kScratchRegister;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001954 switch (src.type()) {
1955 case Constant::kInt32:
1956 // TODO(dcarney): don't need scratch in this case.
1957 __ Set(dst, src.ToInt32());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001958 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001959 case Constant::kInt64:
1960 __ Set(dst, src.ToInt64());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001961 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001962 case Constant::kFloat32:
1963 __ Move(dst,
1964 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1965 break;
1966 case Constant::kFloat64:
1967 __ Move(dst,
1968 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1969 break;
1970 case Constant::kExternalReference:
1971 __ Move(dst, src.ToExternalReference());
1972 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001973 case Constant::kHeapObject: {
1974 Handle<HeapObject> src_object = src.ToHeapObject();
1975 Heap::RootListIndex index;
1976 int offset;
1977 if (IsMaterializableFromFrame(src_object, &offset)) {
1978 __ movp(dst, Operand(rbp, offset));
1979 } else if (IsMaterializableFromRoot(src_object, &index)) {
1980 __ LoadRoot(dst, index);
1981 } else {
1982 __ Move(dst, src_object);
1983 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001984 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001985 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001986 case Constant::kRpoNumber:
1987 UNREACHABLE(); // TODO(dcarney): load of labels on x64.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001988 break;
1989 }
1990 if (destination->IsStackSlot()) {
1991 __ movq(g.ToOperand(destination), kScratchRegister);
1992 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001993 } else if (src.type() == Constant::kFloat32) {
1994 // TODO(turbofan): Can we do better here?
1995 uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001996 if (destination->IsDoubleRegister()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001997 __ Move(g.ToDoubleRegister(destination), src_const);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001998 } else {
1999 DCHECK(destination->IsDoubleStackSlot());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002000 Operand dst = g.ToOperand(destination);
2001 __ movl(dst, Immediate(src_const));
2002 }
2003 } else {
2004 DCHECK_EQ(Constant::kFloat64, src.type());
2005 uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
2006 if (destination->IsDoubleRegister()) {
2007 __ Move(g.ToDoubleRegister(destination), src_const);
2008 } else {
2009 DCHECK(destination->IsDoubleStackSlot());
2010 __ movq(kScratchRegister, src_const);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002011 __ movq(g.ToOperand(destination), kScratchRegister);
2012 }
2013 }
2014 } else if (source->IsDoubleRegister()) {
2015 XMMRegister src = g.ToDoubleRegister(source);
2016 if (destination->IsDoubleRegister()) {
2017 XMMRegister dst = g.ToDoubleRegister(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002018 __ Movapd(dst, src);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002019 } else {
2020 DCHECK(destination->IsDoubleStackSlot());
2021 Operand dst = g.ToOperand(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002022 __ Movsd(dst, src);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002023 }
2024 } else if (source->IsDoubleStackSlot()) {
2025 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
2026 Operand src = g.ToOperand(source);
2027 if (destination->IsDoubleRegister()) {
2028 XMMRegister dst = g.ToDoubleRegister(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002029 __ Movsd(dst, src);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002030 } else {
2031 // We rely on having xmm0 available as a fixed scratch register.
2032 Operand dst = g.ToOperand(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002033 __ Movsd(xmm0, src);
2034 __ Movsd(dst, xmm0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002035 }
2036 } else {
2037 UNREACHABLE();
2038 }
2039}
2040
2041
2042void CodeGenerator::AssembleSwap(InstructionOperand* source,
2043 InstructionOperand* destination) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002044 X64OperandConverter g(this, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002045 // Dispatch on the source and destination operand kinds. Not all
2046 // combinations are possible.
2047 if (source->IsRegister() && destination->IsRegister()) {
2048 // Register-register.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002049 Register src = g.ToRegister(source);
2050 Register dst = g.ToRegister(destination);
2051 __ movq(kScratchRegister, src);
2052 __ movq(src, dst);
2053 __ movq(dst, kScratchRegister);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002054 } else if (source->IsRegister() && destination->IsStackSlot()) {
2055 Register src = g.ToRegister(source);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002056 __ pushq(src);
2057 frame_access_state()->IncreaseSPDelta(1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002058 Operand dst = g.ToOperand(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002059 __ movq(src, dst);
2060 frame_access_state()->IncreaseSPDelta(-1);
2061 dst = g.ToOperand(destination);
2062 __ popq(dst);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002063 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
2064 (source->IsDoubleStackSlot() &&
2065 destination->IsDoubleStackSlot())) {
2066 // Memory-memory.
2067 Register tmp = kScratchRegister;
2068 Operand src = g.ToOperand(source);
2069 Operand dst = g.ToOperand(destination);
2070 __ movq(tmp, dst);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002071 __ pushq(src);
2072 frame_access_state()->IncreaseSPDelta(1);
2073 src = g.ToOperand(source);
2074 __ movq(src, tmp);
2075 frame_access_state()->IncreaseSPDelta(-1);
2076 dst = g.ToOperand(destination);
2077 __ popq(dst);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002078 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
2079 // XMM register-register swap. We rely on having xmm0
2080 // available as a fixed scratch register.
2081 XMMRegister src = g.ToDoubleRegister(source);
2082 XMMRegister dst = g.ToDoubleRegister(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002083 __ Movapd(xmm0, src);
2084 __ Movapd(src, dst);
2085 __ Movapd(dst, xmm0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002086 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002087 // XMM register-memory swap. We rely on having xmm0
2088 // available as a fixed scratch register.
2089 XMMRegister src = g.ToDoubleRegister(source);
2090 Operand dst = g.ToOperand(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002091 __ Movsd(xmm0, src);
2092 __ Movsd(src, dst);
2093 __ Movsd(dst, xmm0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002094 } else {
2095 // No other combinations are possible.
2096 UNREACHABLE();
2097 }
2098}
2099
2100
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002101void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2102 for (size_t index = 0; index < target_count; ++index) {
2103 __ dq(targets[index]);
2104 }
2105}
2106
2107
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002108void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
2109
2110
2111void CodeGenerator::EnsureSpaceForLazyDeopt() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002112 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2113 return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002114 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002115
2116 int space_needed = Deoptimizer::patch_size();
2117 // Ensure that we have enough space after the previous lazy-bailout
2118 // instruction for patching the code here.
2119 int current_pc = masm()->pc_offset();
2120 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2121 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2122 __ Nop(padding_size);
2123 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002124}
2125
2126#undef __
2127
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002128} // namespace compiler
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002129} // namespace internal
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002130} // namespace v8