blob: 510c0c6a0c78bf62e3cd7bffe3b62fc36870a89f [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007#include "src/ast/scopes.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00008#include "src/compiler/code-generator-impl.h"
9#include "src/compiler/gap-resolver.h"
10#include "src/compiler/node-matchers.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011#include "src/compiler/osr.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/x64/assembler-x64.h"
13#include "src/x64/macro-assembler-x64.h"
14
15namespace v8 {
16namespace internal {
17namespace compiler {
18
19#define __ masm()->
20
21
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000022#define kScratchDoubleReg xmm0
23
24
Ben Murdochb8a8cc12014-11-26 15:28:44 +000025// Adds X64 specific methods for decoding operands.
26class X64OperandConverter : public InstructionOperandConverter {
27 public:
28 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
29 : InstructionOperandConverter(gen, instr) {}
30
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000031 Immediate InputImmediate(size_t index) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000032 return ToImmediate(instr_->InputAt(index));
33 }
34
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000035 Operand InputOperand(size_t index, int extra = 0) {
36 return ToOperand(instr_->InputAt(index), extra);
37 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000038
Emily Bernierd0a1eb72015-03-24 16:35:39 -040039 Operand OutputOperand() { return ToOperand(instr_->Output()); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000040
41 Immediate ToImmediate(InstructionOperand* operand) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000042 Constant constant = ToConstant(operand);
43 if (constant.type() == Constant::kFloat64) {
44 DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
45 return Immediate(0);
46 }
47 return Immediate(constant.ToInt32());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000048 }
49
50 Operand ToOperand(InstructionOperand* op, int extra = 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000051 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000052 FrameOffset offset = frame_access_state()->GetFrameOffset(
53 AllocatedOperand::cast(op)->index());
54 return Operand(offset.from_stack_pointer() ? rsp : rbp,
55 offset.offset() + extra);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000056 }
57
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000058 static size_t NextOffset(size_t* offset) {
59 size_t i = *offset;
Emily Bernierd0a1eb72015-03-24 16:35:39 -040060 (*offset)++;
61 return i;
62 }
63
64 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
65 STATIC_ASSERT(0 == static_cast<int>(times_1));
66 STATIC_ASSERT(1 == static_cast<int>(times_2));
67 STATIC_ASSERT(2 == static_cast<int>(times_4));
68 STATIC_ASSERT(3 == static_cast<int>(times_8));
69 int scale = static_cast<int>(mode - one);
70 DCHECK(scale >= 0 && scale < 4);
71 return static_cast<ScaleFactor>(scale);
72 }
73
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000074 Operand MemoryOperand(size_t* offset) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -040075 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
76 switch (mode) {
77 case kMode_MR: {
78 Register base = InputRegister(NextOffset(offset));
79 int32_t disp = 0;
80 return Operand(base, disp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000081 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -040082 case kMode_MRI: {
83 Register base = InputRegister(NextOffset(offset));
84 int32_t disp = InputInt32(NextOffset(offset));
85 return Operand(base, disp);
86 }
87 case kMode_MR1:
88 case kMode_MR2:
89 case kMode_MR4:
90 case kMode_MR8: {
91 Register base = InputRegister(NextOffset(offset));
92 Register index = InputRegister(NextOffset(offset));
93 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
94 int32_t disp = 0;
95 return Operand(base, index, scale, disp);
96 }
97 case kMode_MR1I:
98 case kMode_MR2I:
99 case kMode_MR4I:
100 case kMode_MR8I: {
101 Register base = InputRegister(NextOffset(offset));
102 Register index = InputRegister(NextOffset(offset));
103 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
104 int32_t disp = InputInt32(NextOffset(offset));
105 return Operand(base, index, scale, disp);
106 }
107 case kMode_M1: {
108 Register base = InputRegister(NextOffset(offset));
109 int32_t disp = 0;
110 return Operand(base, disp);
111 }
112 case kMode_M2:
113 UNREACHABLE(); // Should use kModeMR with more compact encoding instead
114 return Operand(no_reg, 0);
115 case kMode_M4:
116 case kMode_M8: {
117 Register index = InputRegister(NextOffset(offset));
118 ScaleFactor scale = ScaleFor(kMode_M1, mode);
119 int32_t disp = 0;
120 return Operand(index, scale, disp);
121 }
122 case kMode_M1I:
123 case kMode_M2I:
124 case kMode_M4I:
125 case kMode_M8I: {
126 Register index = InputRegister(NextOffset(offset));
127 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
128 int32_t disp = InputInt32(NextOffset(offset));
129 return Operand(index, scale, disp);
130 }
131 case kMode_None:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000132 UNREACHABLE();
133 return Operand(no_reg, 0);
134 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400135 UNREACHABLE();
136 return Operand(no_reg, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000137 }
138
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000139 Operand MemoryOperand(size_t first_input = 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000140 return MemoryOperand(&first_input);
141 }
142};
143
144
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400145namespace {
146
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000147bool HasImmediateInput(Instruction* instr, size_t index) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000148 return instr->InputAt(index)->IsImmediate();
149}
150
151
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000152class OutOfLineLoadZero final : public OutOfLineCode {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400153 public:
154 OutOfLineLoadZero(CodeGenerator* gen, Register result)
155 : OutOfLineCode(gen), result_(result) {}
156
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000157 void Generate() final { __ xorl(result_, result_); }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400158
159 private:
160 Register const result_;
161};
162
163
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000164class OutOfLineLoadNaN final : public OutOfLineCode {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400165 public:
166 OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
167 : OutOfLineCode(gen), result_(result) {}
168
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000169 void Generate() final { __ Pcmpeqd(result_, result_); }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400170
171 private:
172 XMMRegister const result_;
173};
174
175
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000176class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400177 public:
178 OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
179 XMMRegister input)
180 : OutOfLineCode(gen), result_(result), input_(input) {}
181
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000182 void Generate() final {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400183 __ subp(rsp, Immediate(kDoubleSize));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000184 __ Movsd(MemOperand(rsp, 0), input_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400185 __ SlowTruncateToI(result_, rsp, 0);
186 __ addp(rsp, Immediate(kDoubleSize));
187 }
188
189 private:
190 Register const result_;
191 XMMRegister const input_;
192};
193
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000194
195class OutOfLineRecordWrite final : public OutOfLineCode {
196 public:
197 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
198 Register value, Register scratch0, Register scratch1,
199 RecordWriteMode mode)
200 : OutOfLineCode(gen),
201 object_(object),
202 operand_(operand),
203 value_(value),
204 scratch0_(scratch0),
205 scratch1_(scratch1),
206 mode_(mode) {}
207
208 void Generate() final {
209 if (mode_ > RecordWriteMode::kValueIsPointer) {
210 __ JumpIfSmi(value_, exit());
211 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100212 __ CheckPageFlag(value_, scratch0_,
213 MemoryChunk::kPointersToHereAreInterestingMask, zero,
214 exit());
215 RememberedSetAction const remembered_set_action =
216 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
217 : OMIT_REMEMBERED_SET;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000218 SaveFPRegsMode const save_fp_mode =
219 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
220 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100221 remembered_set_action, save_fp_mode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000222 __ leap(scratch1_, operand_);
223 __ CallStub(&stub);
224 }
225
226 private:
227 Register const object_;
228 Operand const operand_;
229 Register const value_;
230 Register const scratch0_;
231 Register const scratch1_;
232 RecordWriteMode const mode_;
233};
234
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400235} // namespace
236
237
238#define ASSEMBLE_UNOP(asm_instr) \
239 do { \
240 if (instr->Output()->IsRegister()) { \
241 __ asm_instr(i.OutputRegister()); \
242 } else { \
243 __ asm_instr(i.OutputOperand()); \
244 } \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000245 } while (0)
246
247
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400248#define ASSEMBLE_BINOP(asm_instr) \
249 do { \
250 if (HasImmediateInput(instr, 1)) { \
251 if (instr->InputAt(0)->IsRegister()) { \
252 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
253 } else { \
254 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
255 } \
256 } else { \
257 if (instr->InputAt(1)->IsRegister()) { \
258 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
259 } else { \
260 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
261 } \
262 } \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000263 } while (0)
264
Ben Murdoch097c5b22016-05-18 11:27:45 +0100265#define ASSEMBLE_COMPARE(asm_instr) \
266 do { \
267 if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
268 size_t index = 0; \
269 Operand left = i.MemoryOperand(&index); \
270 if (HasImmediateInput(instr, index)) { \
271 __ asm_instr(left, i.InputImmediate(index)); \
272 } else { \
273 __ asm_instr(left, i.InputRegister(index)); \
274 } \
275 } else { \
276 if (HasImmediateInput(instr, 1)) { \
277 if (instr->InputAt(0)->IsRegister()) { \
278 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
279 } else { \
280 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
281 } \
282 } else { \
283 if (instr->InputAt(1)->IsRegister()) { \
284 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
285 } else { \
286 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
287 } \
288 } \
289 } \
290 } while (0)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000291
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400292#define ASSEMBLE_MULT(asm_instr) \
293 do { \
294 if (HasImmediateInput(instr, 1)) { \
295 if (instr->InputAt(0)->IsRegister()) { \
296 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
297 i.InputImmediate(1)); \
298 } else { \
299 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
300 i.InputImmediate(1)); \
301 } \
302 } else { \
303 if (instr->InputAt(1)->IsRegister()) { \
304 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
305 } else { \
306 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
307 } \
308 } \
309 } while (0)
310
311
312#define ASSEMBLE_SHIFT(asm_instr, width) \
313 do { \
314 if (HasImmediateInput(instr, 1)) { \
315 if (instr->Output()->IsRegister()) { \
316 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
317 } else { \
318 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
319 } \
320 } else { \
321 if (instr->Output()->IsRegister()) { \
322 __ asm_instr##_cl(i.OutputRegister()); \
323 } else { \
324 __ asm_instr##_cl(i.OutputOperand()); \
325 } \
326 } \
327 } while (0)
328
329
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000330#define ASSEMBLE_MOVX(asm_instr) \
331 do { \
332 if (instr->addressing_mode() != kMode_None) { \
333 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
334 } else if (instr->InputAt(0)->IsRegister()) { \
335 __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
336 } else { \
337 __ asm_instr(i.OutputRegister(), i.InputOperand(0)); \
338 } \
339 } while (0)
340
341
342#define ASSEMBLE_SSE_BINOP(asm_instr) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400343 do { \
344 if (instr->InputAt(1)->IsDoubleRegister()) { \
345 __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
346 } else { \
347 __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
348 } \
349 } while (0)
350
351
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000352#define ASSEMBLE_SSE_UNOP(asm_instr) \
353 do { \
354 if (instr->InputAt(0)->IsDoubleRegister()) { \
355 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
356 } else { \
357 __ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0)); \
358 } \
359 } while (0)
360
361
362#define ASSEMBLE_AVX_BINOP(asm_instr) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400363 do { \
364 CpuFeatureScope avx_scope(masm(), AVX); \
365 if (instr->InputAt(1)->IsDoubleRegister()) { \
366 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
367 i.InputDoubleRegister(1)); \
368 } else { \
369 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
370 i.InputOperand(1)); \
371 } \
372 } while (0)
373
374
375#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
376 do { \
377 auto result = i.OutputDoubleRegister(); \
378 auto buffer = i.InputRegister(0); \
379 auto index1 = i.InputRegister(1); \
380 auto index2 = i.InputInt32(2); \
381 OutOfLineCode* ool; \
382 if (instr->InputAt(3)->IsRegister()) { \
383 auto length = i.InputRegister(3); \
384 DCHECK_EQ(0, index2); \
385 __ cmpl(index1, length); \
386 ool = new (zone()) OutOfLineLoadNaN(this, result); \
387 } else { \
388 auto length = i.InputInt32(3); \
389 DCHECK_LE(index2, length); \
390 __ cmpq(index1, Immediate(length - index2)); \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000391 class OutOfLineLoadFloat final : public OutOfLineCode { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400392 public: \
393 OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
394 Register buffer, Register index1, int32_t index2, \
395 int32_t length) \
396 : OutOfLineCode(gen), \
397 result_(result), \
398 buffer_(buffer), \
399 index1_(index1), \
400 index2_(index2), \
401 length_(length) {} \
402 \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000403 void Generate() final { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400404 __ leal(kScratchRegister, Operand(index1_, index2_)); \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000405 __ Pcmpeqd(result_, result_); \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400406 __ cmpl(kScratchRegister, Immediate(length_)); \
407 __ j(above_equal, exit()); \
408 __ asm_instr(result_, \
409 Operand(buffer_, kScratchRegister, times_1, 0)); \
410 } \
411 \
412 private: \
413 XMMRegister const result_; \
414 Register const buffer_; \
415 Register const index1_; \
416 int32_t const index2_; \
417 int32_t const length_; \
418 }; \
419 ool = new (zone()) \
420 OutOfLineLoadFloat(this, result, buffer, index1, index2, length); \
421 } \
422 __ j(above_equal, ool->entry()); \
423 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
424 __ bind(ool->exit()); \
425 } while (false)
426
427
428#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
429 do { \
430 auto result = i.OutputRegister(); \
431 auto buffer = i.InputRegister(0); \
432 auto index1 = i.InputRegister(1); \
433 auto index2 = i.InputInt32(2); \
434 OutOfLineCode* ool; \
435 if (instr->InputAt(3)->IsRegister()) { \
436 auto length = i.InputRegister(3); \
437 DCHECK_EQ(0, index2); \
438 __ cmpl(index1, length); \
439 ool = new (zone()) OutOfLineLoadZero(this, result); \
440 } else { \
441 auto length = i.InputInt32(3); \
442 DCHECK_LE(index2, length); \
443 __ cmpq(index1, Immediate(length - index2)); \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000444 class OutOfLineLoadInteger final : public OutOfLineCode { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400445 public: \
446 OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
447 Register buffer, Register index1, int32_t index2, \
448 int32_t length) \
449 : OutOfLineCode(gen), \
450 result_(result), \
451 buffer_(buffer), \
452 index1_(index1), \
453 index2_(index2), \
454 length_(length) {} \
455 \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000456 void Generate() final { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400457 Label oob; \
458 __ leal(kScratchRegister, Operand(index1_, index2_)); \
459 __ cmpl(kScratchRegister, Immediate(length_)); \
460 __ j(above_equal, &oob, Label::kNear); \
461 __ asm_instr(result_, \
462 Operand(buffer_, kScratchRegister, times_1, 0)); \
463 __ jmp(exit()); \
464 __ bind(&oob); \
465 __ xorl(result_, result_); \
466 } \
467 \
468 private: \
469 Register const result_; \
470 Register const buffer_; \
471 Register const index1_; \
472 int32_t const index2_; \
473 int32_t const length_; \
474 }; \
475 ool = new (zone()) \
476 OutOfLineLoadInteger(this, result, buffer, index1, index2, length); \
477 } \
478 __ j(above_equal, ool->entry()); \
479 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
480 __ bind(ool->exit()); \
481 } while (false)
482
483
484#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
485 do { \
486 auto buffer = i.InputRegister(0); \
487 auto index1 = i.InputRegister(1); \
488 auto index2 = i.InputInt32(2); \
489 auto value = i.InputDoubleRegister(4); \
490 if (instr->InputAt(3)->IsRegister()) { \
491 auto length = i.InputRegister(3); \
492 DCHECK_EQ(0, index2); \
493 Label done; \
494 __ cmpl(index1, length); \
495 __ j(above_equal, &done, Label::kNear); \
496 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
497 __ bind(&done); \
498 } else { \
499 auto length = i.InputInt32(3); \
500 DCHECK_LE(index2, length); \
501 __ cmpq(index1, Immediate(length - index2)); \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000502 class OutOfLineStoreFloat final : public OutOfLineCode { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400503 public: \
504 OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
505 Register index1, int32_t index2, int32_t length, \
506 XMMRegister value) \
507 : OutOfLineCode(gen), \
508 buffer_(buffer), \
509 index1_(index1), \
510 index2_(index2), \
511 length_(length), \
512 value_(value) {} \
513 \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000514 void Generate() final { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400515 __ leal(kScratchRegister, Operand(index1_, index2_)); \
516 __ cmpl(kScratchRegister, Immediate(length_)); \
517 __ j(above_equal, exit()); \
518 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
519 value_); \
520 } \
521 \
522 private: \
523 Register const buffer_; \
524 Register const index1_; \
525 int32_t const index2_; \
526 int32_t const length_; \
527 XMMRegister const value_; \
528 }; \
529 auto ool = new (zone()) \
530 OutOfLineStoreFloat(this, buffer, index1, index2, length, value); \
531 __ j(above_equal, ool->entry()); \
532 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
533 __ bind(ool->exit()); \
534 } \
535 } while (false)
536
537
538#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
539 do { \
540 auto buffer = i.InputRegister(0); \
541 auto index1 = i.InputRegister(1); \
542 auto index2 = i.InputInt32(2); \
543 if (instr->InputAt(3)->IsRegister()) { \
544 auto length = i.InputRegister(3); \
545 DCHECK_EQ(0, index2); \
546 Label done; \
547 __ cmpl(index1, length); \
548 __ j(above_equal, &done, Label::kNear); \
549 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
550 __ bind(&done); \
551 } else { \
552 auto length = i.InputInt32(3); \
553 DCHECK_LE(index2, length); \
554 __ cmpq(index1, Immediate(length - index2)); \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000555 class OutOfLineStoreInteger final : public OutOfLineCode { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400556 public: \
557 OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
558 Register index1, int32_t index2, int32_t length, \
559 Value value) \
560 : OutOfLineCode(gen), \
561 buffer_(buffer), \
562 index1_(index1), \
563 index2_(index2), \
564 length_(length), \
565 value_(value) {} \
566 \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000567 void Generate() final { \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400568 __ leal(kScratchRegister, Operand(index1_, index2_)); \
569 __ cmpl(kScratchRegister, Immediate(length_)); \
570 __ j(above_equal, exit()); \
571 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
572 value_); \
573 } \
574 \
575 private: \
576 Register const buffer_; \
577 Register const index1_; \
578 int32_t const index2_; \
579 int32_t const length_; \
580 Value const value_; \
581 }; \
582 auto ool = new (zone()) \
583 OutOfLineStoreInteger(this, buffer, index1, index2, length, value); \
584 __ j(above_equal, ool->entry()); \
585 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
586 __ bind(ool->exit()); \
587 } \
588 } while (false)
589
590
591#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
592 do { \
593 if (instr->InputAt(4)->IsRegister()) { \
594 Register value = i.InputRegister(4); \
595 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
596 } else { \
597 Immediate value = i.InputImmediate(4); \
598 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
599 } \
600 } while (false)
601
602
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000603void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
604 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
605 if (sp_slot_delta > 0) {
606 __ addq(rsp, Immediate(sp_slot_delta * kPointerSize));
607 }
608 frame_access_state()->SetFrameAccessToDefault();
609}
610
611
612void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
613 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
614 if (sp_slot_delta < 0) {
615 __ subq(rsp, Immediate(-sp_slot_delta * kPointerSize));
616 frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
617 }
618 if (frame()->needs_frame()) {
619 __ movq(rbp, MemOperand(rbp, 0));
620 }
621 frame_access_state()->SetFrameAccessToSP();
622}
623
624
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000625// Assembles an instruction after register allocation, producing machine code.
626void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
627 X64OperandConverter i(this, instr);
628
629 switch (ArchOpcodeField::decode(instr->opcode())) {
630 case kArchCallCodeObject: {
631 EnsureSpaceForLazyDeopt();
632 if (HasImmediateInput(instr, 0)) {
633 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
634 __ Call(code, RelocInfo::CODE_TARGET);
635 } else {
636 Register reg = i.InputRegister(0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000637 __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
638 __ call(reg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000639 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000640 RecordCallPosition(instr);
641 frame_access_state()->ClearSPDelta();
642 break;
643 }
644 case kArchTailCallCodeObject: {
645 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
646 AssembleDeconstructActivationRecord(stack_param_delta);
647 if (HasImmediateInput(instr, 0)) {
648 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
649 __ jmp(code, RelocInfo::CODE_TARGET);
650 } else {
651 Register reg = i.InputRegister(0);
652 __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
653 __ jmp(reg);
654 }
655 frame_access_state()->ClearSPDelta();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000656 break;
657 }
658 case kArchCallJSFunction: {
659 EnsureSpaceForLazyDeopt();
660 Register func = i.InputRegister(0);
661 if (FLAG_debug_code) {
662 // Check the function's context matches the context argument.
663 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
664 __ Assert(equal, kWrongFunctionContext);
665 }
666 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000667 frame_access_state()->ClearSPDelta();
668 RecordCallPosition(instr);
669 break;
670 }
671 case kArchTailCallJSFunction: {
672 Register func = i.InputRegister(0);
673 if (FLAG_debug_code) {
674 // Check the function's context matches the context argument.
675 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
676 __ Assert(equal, kWrongFunctionContext);
677 }
678 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
679 AssembleDeconstructActivationRecord(stack_param_delta);
680 __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
681 frame_access_state()->ClearSPDelta();
682 break;
683 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000684 case kArchPrepareCallCFunction: {
685 // Frame alignment requires using FP-relative frame addressing.
686 frame_access_state()->SetFrameAccessToFP();
687 int const num_parameters = MiscField::decode(instr->opcode());
688 __ PrepareCallCFunction(num_parameters);
689 break;
690 }
691 case kArchPrepareTailCall:
692 AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
693 break;
694 case kArchCallCFunction: {
695 int const num_parameters = MiscField::decode(instr->opcode());
696 if (HasImmediateInput(instr, 0)) {
697 ExternalReference ref = i.InputExternalReference(0);
698 __ CallCFunction(ref, num_parameters);
699 } else {
700 Register func = i.InputRegister(0);
701 __ CallCFunction(func, num_parameters);
702 }
703 frame_access_state()->SetFrameAccessToDefault();
704 frame_access_state()->ClearSPDelta();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000705 break;
706 }
707 case kArchJmp:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400708 AssembleArchJump(i.InputRpo(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000709 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000710 case kArchLookupSwitch:
711 AssembleArchLookupSwitch(instr);
712 break;
713 case kArchTableSwitch:
714 AssembleArchTableSwitch(instr);
715 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000716 case kArchNop:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000717 case kArchThrowTerminator:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000718 // don't emit code for nops.
719 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000720 case kArchDeoptimize: {
721 int deopt_state_id =
722 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
723 Deoptimizer::BailoutType bailout_type =
724 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
725 AssembleDeoptimizerCall(deopt_state_id, bailout_type);
726 break;
727 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000728 case kArchRet:
729 AssembleReturn();
730 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400731 case kArchStackPointer:
732 __ movq(i.OutputRegister(), rsp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000733 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000734 case kArchFramePointer:
735 __ movq(i.OutputRegister(), rbp);
736 break;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100737 case kArchParentFramePointer:
738 if (frame_access_state()->frame()->needs_frame()) {
739 __ movq(i.OutputRegister(), Operand(rbp, 0));
740 } else {
741 __ movq(i.OutputRegister(), rbp);
742 }
743 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400744 case kArchTruncateDoubleToI: {
745 auto result = i.OutputRegister();
746 auto input = i.InputDoubleRegister(0);
747 auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000748 __ Cvttsd2siq(result, input);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400749 __ cmpq(result, Immediate(1));
750 __ j(overflow, ool->entry());
751 __ bind(ool->exit());
752 break;
753 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000754 case kArchStoreWithWriteBarrier: {
755 RecordWriteMode mode =
756 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
757 Register object = i.InputRegister(0);
758 size_t index = 0;
759 Operand operand = i.MemoryOperand(&index);
760 Register value = i.InputRegister(index);
761 Register scratch0 = i.TempRegister(0);
762 Register scratch1 = i.TempRegister(1);
763 auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
764 scratch0, scratch1, mode);
765 __ movp(operand, value);
766 __ CheckPageFlag(object, scratch0,
767 MemoryChunk::kPointersFromHereAreInterestingMask,
768 not_zero, ool->entry());
769 __ bind(ool->exit());
770 break;
771 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100772 case kArchStackSlot: {
773 FrameOffset offset =
774 frame_access_state()->GetFrameOffset(i.InputInt32(0));
775 Register base;
776 if (offset.from_stack_pointer()) {
777 base = rsp;
778 } else {
779 base = rbp;
780 }
781 __ leaq(i.OutputRegister(), Operand(base, offset.offset()));
782 break;
783 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000784 case kX64Add32:
785 ASSEMBLE_BINOP(addl);
786 break;
787 case kX64Add:
788 ASSEMBLE_BINOP(addq);
789 break;
790 case kX64Sub32:
791 ASSEMBLE_BINOP(subl);
792 break;
793 case kX64Sub:
794 ASSEMBLE_BINOP(subq);
795 break;
796 case kX64And32:
797 ASSEMBLE_BINOP(andl);
798 break;
799 case kX64And:
800 ASSEMBLE_BINOP(andq);
801 break;
802 case kX64Cmp32:
Ben Murdoch097c5b22016-05-18 11:27:45 +0100803 ASSEMBLE_COMPARE(cmpl);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000804 break;
805 case kX64Cmp:
Ben Murdoch097c5b22016-05-18 11:27:45 +0100806 ASSEMBLE_COMPARE(cmpq);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000807 break;
808 case kX64Test32:
Ben Murdoch097c5b22016-05-18 11:27:45 +0100809 ASSEMBLE_COMPARE(testl);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000810 break;
811 case kX64Test:
Ben Murdoch097c5b22016-05-18 11:27:45 +0100812 ASSEMBLE_COMPARE(testq);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000813 break;
814 case kX64Imul32:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400815 ASSEMBLE_MULT(imull);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000816 break;
817 case kX64Imul:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400818 ASSEMBLE_MULT(imulq);
819 break;
820 case kX64ImulHigh32:
821 if (instr->InputAt(1)->IsRegister()) {
822 __ imull(i.InputRegister(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000823 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400824 __ imull(i.InputOperand(1));
825 }
826 break;
827 case kX64UmulHigh32:
828 if (instr->InputAt(1)->IsRegister()) {
829 __ mull(i.InputRegister(1));
830 } else {
831 __ mull(i.InputOperand(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000832 }
833 break;
834 case kX64Idiv32:
835 __ cdq();
836 __ idivl(i.InputRegister(1));
837 break;
838 case kX64Idiv:
839 __ cqo();
840 __ idivq(i.InputRegister(1));
841 break;
842 case kX64Udiv32:
843 __ xorl(rdx, rdx);
844 __ divl(i.InputRegister(1));
845 break;
846 case kX64Udiv:
847 __ xorq(rdx, rdx);
848 __ divq(i.InputRegister(1));
849 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400850 case kX64Not:
851 ASSEMBLE_UNOP(notq);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000852 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400853 case kX64Not32:
854 ASSEMBLE_UNOP(notl);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000855 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400856 case kX64Neg:
857 ASSEMBLE_UNOP(negq);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000858 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400859 case kX64Neg32:
860 ASSEMBLE_UNOP(negl);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000861 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000862 case kX64Or32:
863 ASSEMBLE_BINOP(orl);
864 break;
865 case kX64Or:
866 ASSEMBLE_BINOP(orq);
867 break;
868 case kX64Xor32:
869 ASSEMBLE_BINOP(xorl);
870 break;
871 case kX64Xor:
872 ASSEMBLE_BINOP(xorq);
873 break;
874 case kX64Shl32:
875 ASSEMBLE_SHIFT(shll, 5);
876 break;
877 case kX64Shl:
878 ASSEMBLE_SHIFT(shlq, 6);
879 break;
880 case kX64Shr32:
881 ASSEMBLE_SHIFT(shrl, 5);
882 break;
883 case kX64Shr:
884 ASSEMBLE_SHIFT(shrq, 6);
885 break;
886 case kX64Sar32:
887 ASSEMBLE_SHIFT(sarl, 5);
888 break;
889 case kX64Sar:
890 ASSEMBLE_SHIFT(sarq, 6);
891 break;
892 case kX64Ror32:
893 ASSEMBLE_SHIFT(rorl, 5);
894 break;
895 case kX64Ror:
896 ASSEMBLE_SHIFT(rorq, 6);
897 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000898 case kX64Lzcnt:
899 if (instr->InputAt(0)->IsRegister()) {
900 __ Lzcntq(i.OutputRegister(), i.InputRegister(0));
901 } else {
902 __ Lzcntq(i.OutputRegister(), i.InputOperand(0));
903 }
904 break;
905 case kX64Lzcnt32:
906 if (instr->InputAt(0)->IsRegister()) {
907 __ Lzcntl(i.OutputRegister(), i.InputRegister(0));
908 } else {
909 __ Lzcntl(i.OutputRegister(), i.InputOperand(0));
910 }
911 break;
912 case kX64Tzcnt:
913 if (instr->InputAt(0)->IsRegister()) {
914 __ Tzcntq(i.OutputRegister(), i.InputRegister(0));
915 } else {
916 __ Tzcntq(i.OutputRegister(), i.InputOperand(0));
917 }
918 break;
919 case kX64Tzcnt32:
920 if (instr->InputAt(0)->IsRegister()) {
921 __ Tzcntl(i.OutputRegister(), i.InputRegister(0));
922 } else {
923 __ Tzcntl(i.OutputRegister(), i.InputOperand(0));
924 }
925 break;
926 case kX64Popcnt:
927 if (instr->InputAt(0)->IsRegister()) {
928 __ Popcntq(i.OutputRegister(), i.InputRegister(0));
929 } else {
930 __ Popcntq(i.OutputRegister(), i.InputOperand(0));
931 }
932 break;
933 case kX64Popcnt32:
934 if (instr->InputAt(0)->IsRegister()) {
935 __ Popcntl(i.OutputRegister(), i.InputRegister(0));
936 } else {
937 __ Popcntl(i.OutputRegister(), i.InputOperand(0));
938 }
939 break;
940 case kSSEFloat32Cmp:
941 ASSEMBLE_SSE_BINOP(Ucomiss);
942 break;
943 case kSSEFloat32Add:
944 ASSEMBLE_SSE_BINOP(addss);
945 break;
946 case kSSEFloat32Sub:
947 ASSEMBLE_SSE_BINOP(subss);
948 break;
949 case kSSEFloat32Mul:
950 ASSEMBLE_SSE_BINOP(mulss);
951 break;
952 case kSSEFloat32Div:
953 ASSEMBLE_SSE_BINOP(divss);
954 // Don't delete this mov. It may improve performance on some CPUs,
955 // when there is a (v)mulss depending on the result.
956 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
957 break;
958 case kSSEFloat32Abs: {
959 // TODO(bmeurer): Use RIP relative 128-bit constants.
960 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
961 __ psrlq(kScratchDoubleReg, 33);
962 __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
963 break;
964 }
965 case kSSEFloat32Neg: {
966 // TODO(bmeurer): Use RIP relative 128-bit constants.
967 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
968 __ psllq(kScratchDoubleReg, 31);
969 __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
970 break;
971 }
972 case kSSEFloat32Sqrt:
973 ASSEMBLE_SSE_UNOP(sqrtss);
974 break;
975 case kSSEFloat32Max:
976 ASSEMBLE_SSE_BINOP(maxss);
977 break;
978 case kSSEFloat32Min:
979 ASSEMBLE_SSE_BINOP(minss);
980 break;
981 case kSSEFloat32ToFloat64:
982 ASSEMBLE_SSE_UNOP(Cvtss2sd);
983 break;
984 case kSSEFloat32Round: {
985 CpuFeatureScope sse_scope(masm(), SSE4_1);
986 RoundingMode const mode =
987 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
988 __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
989 break;
990 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100991 case kSSEFloat32ToInt32:
992 if (instr->InputAt(0)->IsDoubleRegister()) {
993 __ Cvttss2si(i.OutputRegister(), i.InputDoubleRegister(0));
994 } else {
995 __ Cvttss2si(i.OutputRegister(), i.InputOperand(0));
996 }
997 break;
998 case kSSEFloat32ToUint32: {
999 if (instr->InputAt(0)->IsDoubleRegister()) {
1000 __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
1001 } else {
1002 __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
1003 }
1004 __ AssertZeroExtended(i.OutputRegister());
1005 break;
1006 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001007 case kSSEFloat64Cmp:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001008 ASSEMBLE_SSE_BINOP(Ucomisd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001009 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001010 case kSSEFloat64Add:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001011 ASSEMBLE_SSE_BINOP(addsd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001012 break;
1013 case kSSEFloat64Sub:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001014 ASSEMBLE_SSE_BINOP(subsd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001015 break;
1016 case kSSEFloat64Mul:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001017 ASSEMBLE_SSE_BINOP(mulsd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001018 break;
1019 case kSSEFloat64Div:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001020 ASSEMBLE_SSE_BINOP(divsd);
1021 // Don't delete this mov. It may improve performance on some CPUs,
1022 // when there is a (v)mulsd depending on the result.
1023 __ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001024 break;
1025 case kSSEFloat64Mod: {
1026 __ subq(rsp, Immediate(kDoubleSize));
1027 // Move values to st(0) and st(1).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001028 __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001029 __ fld_d(Operand(rsp, 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001030 __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001031 __ fld_d(Operand(rsp, 0));
1032 // Loop while fprem isn't done.
1033 Label mod_loop;
1034 __ bind(&mod_loop);
1035 // This instructions traps on all kinds inputs, but we are assuming the
1036 // floating point control word is set to ignore them all.
1037 __ fprem();
1038 // The following 2 instruction implicitly use rax.
1039 __ fnstsw_ax();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001040 if (CpuFeatures::IsSupported(SAHF)) {
1041 CpuFeatureScope sahf_scope(masm(), SAHF);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001042 __ sahf();
1043 } else {
1044 __ shrl(rax, Immediate(8));
1045 __ andl(rax, Immediate(0xFF));
1046 __ pushq(rax);
1047 __ popfq();
1048 }
1049 __ j(parity_even, &mod_loop);
1050 // Move output to stack and clean up.
1051 __ fstp(1);
1052 __ fstp_d(Operand(rsp, 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001053 __ Movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001054 __ addq(rsp, Immediate(kDoubleSize));
1055 break;
1056 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001057 case kSSEFloat64Max:
1058 ASSEMBLE_SSE_BINOP(maxsd);
1059 break;
1060 case kSSEFloat64Min:
1061 ASSEMBLE_SSE_BINOP(minsd);
1062 break;
1063 case kSSEFloat64Abs: {
1064 // TODO(bmeurer): Use RIP relative 128-bit constants.
1065 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1066 __ psrlq(kScratchDoubleReg, 1);
1067 __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
1068 break;
1069 }
1070 case kSSEFloat64Neg: {
1071 // TODO(bmeurer): Use RIP relative 128-bit constants.
1072 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1073 __ psllq(kScratchDoubleReg, 63);
1074 __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
1075 break;
1076 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001077 case kSSEFloat64Sqrt:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001078 ASSEMBLE_SSE_UNOP(sqrtsd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001079 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001080 case kSSEFloat64Round: {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001081 CpuFeatureScope sse_scope(masm(), SSE4_1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001082 RoundingMode const mode =
1083 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
1084 __ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001085 break;
1086 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001087 case kSSEFloat64ToFloat32:
1088 ASSEMBLE_SSE_UNOP(Cvtsd2ss);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001089 break;
1090 case kSSEFloat64ToInt32:
1091 if (instr->InputAt(0)->IsDoubleRegister()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001092 __ Cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001093 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001094 __ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001095 }
1096 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001097 case kSSEFloat64ToUint32: {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001098 if (instr->InputAt(0)->IsDoubleRegister()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001099 __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001100 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001101 __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001102 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001103 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001104 break;
1105 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001106 case kSSEFloat32ToInt64:
1107 if (instr->InputAt(0)->IsDoubleRegister()) {
1108 __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
1109 } else {
1110 __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
1111 }
1112 if (instr->OutputCount() > 1) {
1113 __ Set(i.OutputRegister(1), 1);
1114 Label done;
1115 Label fail;
1116 __ Move(kScratchDoubleReg, static_cast<float>(INT64_MIN));
1117 if (instr->InputAt(0)->IsDoubleRegister()) {
1118 __ Ucomiss(kScratchDoubleReg, i.InputDoubleRegister(0));
1119 } else {
1120 __ Ucomiss(kScratchDoubleReg, i.InputOperand(0));
1121 }
1122 // If the input is NaN, then the conversion fails.
1123 __ j(parity_even, &fail);
1124 // If the input is INT64_MIN, then the conversion succeeds.
1125 __ j(equal, &done);
1126 __ cmpq(i.OutputRegister(0), Immediate(1));
1127 // If the conversion results in INT64_MIN, but the input was not
1128 // INT64_MIN, then the conversion fails.
1129 __ j(no_overflow, &done);
1130 __ bind(&fail);
1131 __ Set(i.OutputRegister(1), 0);
1132 __ bind(&done);
1133 }
1134 break;
1135 case kSSEFloat64ToInt64:
1136 if (instr->InputAt(0)->IsDoubleRegister()) {
1137 __ Cvttsd2siq(i.OutputRegister(0), i.InputDoubleRegister(0));
1138 } else {
1139 __ Cvttsd2siq(i.OutputRegister(0), i.InputOperand(0));
1140 }
1141 if (instr->OutputCount() > 1) {
1142 __ Set(i.OutputRegister(1), 1);
1143 Label done;
1144 Label fail;
1145 __ Move(kScratchDoubleReg, static_cast<double>(INT64_MIN));
1146 if (instr->InputAt(0)->IsDoubleRegister()) {
1147 __ Ucomisd(kScratchDoubleReg, i.InputDoubleRegister(0));
1148 } else {
1149 __ Ucomisd(kScratchDoubleReg, i.InputOperand(0));
1150 }
1151 // If the input is NaN, then the conversion fails.
1152 __ j(parity_even, &fail);
1153 // If the input is INT64_MIN, then the conversion succeeds.
1154 __ j(equal, &done);
1155 __ cmpq(i.OutputRegister(0), Immediate(1));
1156 // If the conversion results in INT64_MIN, but the input was not
1157 // INT64_MIN, then the conversion fails.
1158 __ j(no_overflow, &done);
1159 __ bind(&fail);
1160 __ Set(i.OutputRegister(1), 0);
1161 __ bind(&done);
1162 }
1163 break;
1164 case kSSEFloat32ToUint64: {
1165 Label done;
1166 Label success;
1167 if (instr->OutputCount() > 1) {
1168 __ Set(i.OutputRegister(1), 0);
1169 }
1170 // There does not exist a Float32ToUint64 instruction, so we have to use
1171 // the Float32ToInt64 instruction.
1172 if (instr->InputAt(0)->IsDoubleRegister()) {
1173 __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
1174 } else {
1175 __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
1176 }
1177 // Check if the result of the Float32ToInt64 conversion is positive, we
1178 // are already done.
1179 __ testq(i.OutputRegister(), i.OutputRegister());
1180 __ j(positive, &success);
1181 // The result of the first conversion was negative, which means that the
1182 // input value was not within the positive int64 range. We subtract 2^64
1183 // and convert it again to see if it is within the uint64 range.
1184 __ Move(kScratchDoubleReg, -9223372036854775808.0f);
1185 if (instr->InputAt(0)->IsDoubleRegister()) {
1186 __ addss(kScratchDoubleReg, i.InputDoubleRegister(0));
1187 } else {
1188 __ addss(kScratchDoubleReg, i.InputOperand(0));
1189 }
1190 __ Cvttss2siq(i.OutputRegister(), kScratchDoubleReg);
1191 __ testq(i.OutputRegister(), i.OutputRegister());
1192 // The only possible negative value here is 0x80000000000000000, which is
1193 // used on x64 to indicate an integer overflow.
1194 __ j(negative, &done);
1195 // The input value is within uint64 range and the second conversion worked
1196 // successfully, but we still have to undo the subtraction we did
1197 // earlier.
1198 __ Set(kScratchRegister, 0x8000000000000000);
1199 __ orq(i.OutputRegister(), kScratchRegister);
1200 __ bind(&success);
1201 if (instr->OutputCount() > 1) {
1202 __ Set(i.OutputRegister(1), 1);
1203 }
1204 __ bind(&done);
1205 break;
1206 }
1207 case kSSEFloat64ToUint64: {
1208 Label done;
1209 Label success;
1210 if (instr->OutputCount() > 1) {
1211 __ Set(i.OutputRegister(1), 0);
1212 }
1213 // There does not exist a Float64ToUint64 instruction, so we have to use
1214 // the Float64ToInt64 instruction.
1215 if (instr->InputAt(0)->IsDoubleRegister()) {
1216 __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
1217 } else {
1218 __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
1219 }
1220 // Check if the result of the Float64ToInt64 conversion is positive, we
1221 // are already done.
1222 __ testq(i.OutputRegister(), i.OutputRegister());
1223 __ j(positive, &success);
1224 // The result of the first conversion was negative, which means that the
1225 // input value was not within the positive int64 range. We subtract 2^64
1226 // and convert it again to see if it is within the uint64 range.
1227 __ Move(kScratchDoubleReg, -9223372036854775808.0);
1228 if (instr->InputAt(0)->IsDoubleRegister()) {
1229 __ addsd(kScratchDoubleReg, i.InputDoubleRegister(0));
1230 } else {
1231 __ addsd(kScratchDoubleReg, i.InputOperand(0));
1232 }
1233 __ Cvttsd2siq(i.OutputRegister(), kScratchDoubleReg);
1234 __ testq(i.OutputRegister(), i.OutputRegister());
1235 // The only possible negative value here is 0x80000000000000000, which is
1236 // used on x64 to indicate an integer overflow.
1237 __ j(negative, &done);
1238 // The input value is within uint64 range and the second conversion worked
1239 // successfully, but we still have to undo the subtraction we did
1240 // earlier.
1241 __ Set(kScratchRegister, 0x8000000000000000);
1242 __ orq(i.OutputRegister(), kScratchRegister);
1243 __ bind(&success);
1244 if (instr->OutputCount() > 1) {
1245 __ Set(i.OutputRegister(1), 1);
1246 }
1247 __ bind(&done);
1248 break;
1249 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001250 case kSSEInt32ToFloat64:
1251 if (instr->InputAt(0)->IsRegister()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001252 __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001253 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001254 __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001255 }
1256 break;
Ben Murdoch097c5b22016-05-18 11:27:45 +01001257 case kSSEInt32ToFloat32:
1258 if (instr->InputAt(0)->IsRegister()) {
1259 __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
1260 } else {
1261 __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
1262 }
1263 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001264 case kSSEInt64ToFloat32:
1265 if (instr->InputAt(0)->IsRegister()) {
1266 __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
1267 } else {
1268 __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
1269 }
1270 break;
1271 case kSSEInt64ToFloat64:
1272 if (instr->InputAt(0)->IsRegister()) {
1273 __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
1274 } else {
1275 __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
1276 }
1277 break;
1278 case kSSEUint64ToFloat32:
1279 if (instr->InputAt(0)->IsRegister()) {
1280 __ movq(kScratchRegister, i.InputRegister(0));
1281 } else {
1282 __ movq(kScratchRegister, i.InputOperand(0));
1283 }
1284 __ Cvtqui2ss(i.OutputDoubleRegister(), kScratchRegister,
1285 i.TempRegister(0));
1286 break;
1287 case kSSEUint64ToFloat64:
1288 if (instr->InputAt(0)->IsRegister()) {
1289 __ movq(kScratchRegister, i.InputRegister(0));
1290 } else {
1291 __ movq(kScratchRegister, i.InputOperand(0));
1292 }
1293 __ Cvtqui2sd(i.OutputDoubleRegister(), kScratchRegister,
1294 i.TempRegister(0));
1295 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001296 case kSSEUint32ToFloat64:
1297 if (instr->InputAt(0)->IsRegister()) {
1298 __ movl(kScratchRegister, i.InputRegister(0));
1299 } else {
1300 __ movl(kScratchRegister, i.InputOperand(0));
1301 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001302 __ Cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001303 break;
Ben Murdoch097c5b22016-05-18 11:27:45 +01001304 case kSSEUint32ToFloat32:
1305 if (instr->InputAt(0)->IsRegister()) {
1306 __ movl(kScratchRegister, i.InputRegister(0));
1307 } else {
1308 __ movl(kScratchRegister, i.InputOperand(0));
1309 }
1310 __ Cvtqsi2ss(i.OutputDoubleRegister(), kScratchRegister);
1311 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001312 case kSSEFloat64ExtractLowWord32:
1313 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1314 __ movl(i.OutputRegister(), i.InputOperand(0));
1315 } else {
1316 __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
1317 }
1318 break;
1319 case kSSEFloat64ExtractHighWord32:
1320 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1321 __ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
1322 } else {
1323 __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
1324 }
1325 break;
1326 case kSSEFloat64InsertLowWord32:
1327 if (instr->InputAt(1)->IsRegister()) {
1328 __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0);
1329 } else {
1330 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
1331 }
1332 break;
1333 case kSSEFloat64InsertHighWord32:
1334 if (instr->InputAt(1)->IsRegister()) {
1335 __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1);
1336 } else {
1337 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
1338 }
1339 break;
1340 case kSSEFloat64LoadLowWord32:
1341 if (instr->InputAt(0)->IsRegister()) {
1342 __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
1343 } else {
1344 __ Movd(i.OutputDoubleRegister(), i.InputOperand(0));
1345 }
1346 break;
1347 case kAVXFloat32Cmp: {
1348 CpuFeatureScope avx_scope(masm(), AVX);
1349 if (instr->InputAt(1)->IsDoubleRegister()) {
1350 __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1351 } else {
1352 __ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
1353 }
1354 break;
1355 }
1356 case kAVXFloat32Add:
1357 ASSEMBLE_AVX_BINOP(vaddss);
1358 break;
1359 case kAVXFloat32Sub:
1360 ASSEMBLE_AVX_BINOP(vsubss);
1361 break;
1362 case kAVXFloat32Mul:
1363 ASSEMBLE_AVX_BINOP(vmulss);
1364 break;
1365 case kAVXFloat32Div:
1366 ASSEMBLE_AVX_BINOP(vdivss);
1367 // Don't delete this mov. It may improve performance on some CPUs,
1368 // when there is a (v)mulss depending on the result.
1369 __ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1370 break;
1371 case kAVXFloat32Max:
1372 ASSEMBLE_AVX_BINOP(vmaxss);
1373 break;
1374 case kAVXFloat32Min:
1375 ASSEMBLE_AVX_BINOP(vminss);
1376 break;
1377 case kAVXFloat64Cmp: {
1378 CpuFeatureScope avx_scope(masm(), AVX);
1379 if (instr->InputAt(1)->IsDoubleRegister()) {
1380 __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1381 } else {
1382 __ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
1383 }
1384 break;
1385 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001386 case kAVXFloat64Add:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001387 ASSEMBLE_AVX_BINOP(vaddsd);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001388 break;
1389 case kAVXFloat64Sub:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001390 ASSEMBLE_AVX_BINOP(vsubsd);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001391 break;
1392 case kAVXFloat64Mul:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001393 ASSEMBLE_AVX_BINOP(vmulsd);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001394 break;
1395 case kAVXFloat64Div:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001396 ASSEMBLE_AVX_BINOP(vdivsd);
1397 // Don't delete this mov. It may improve performance on some CPUs,
1398 // when there is a (v)mulsd depending on the result.
1399 __ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001400 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001401 case kAVXFloat64Max:
1402 ASSEMBLE_AVX_BINOP(vmaxsd);
1403 break;
1404 case kAVXFloat64Min:
1405 ASSEMBLE_AVX_BINOP(vminsd);
1406 break;
1407 case kAVXFloat32Abs: {
1408 // TODO(bmeurer): Use RIP relative 128-bit constants.
1409 CpuFeatureScope avx_scope(masm(), AVX);
1410 __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
1411 __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33);
1412 if (instr->InputAt(0)->IsDoubleRegister()) {
1413 __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1414 i.InputDoubleRegister(0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001415 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001416 __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1417 i.InputOperand(0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001418 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001419 break;
1420 }
1421 case kAVXFloat32Neg: {
1422 // TODO(bmeurer): Use RIP relative 128-bit constants.
1423 CpuFeatureScope avx_scope(masm(), AVX);
1424 __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
1425 __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31);
1426 if (instr->InputAt(0)->IsDoubleRegister()) {
1427 __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1428 i.InputDoubleRegister(0));
1429 } else {
1430 __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1431 i.InputOperand(0));
1432 }
1433 break;
1434 }
1435 case kAVXFloat64Abs: {
1436 // TODO(bmeurer): Use RIP relative 128-bit constants.
1437 CpuFeatureScope avx_scope(masm(), AVX);
1438 __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
1439 __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1);
1440 if (instr->InputAt(0)->IsDoubleRegister()) {
1441 __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1442 i.InputDoubleRegister(0));
1443 } else {
1444 __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1445 i.InputOperand(0));
1446 }
1447 break;
1448 }
1449 case kAVXFloat64Neg: {
1450 // TODO(bmeurer): Use RIP relative 128-bit constants.
1451 CpuFeatureScope avx_scope(masm(), AVX);
1452 __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
1453 __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63);
1454 if (instr->InputAt(0)->IsDoubleRegister()) {
1455 __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1456 i.InputDoubleRegister(0));
1457 } else {
1458 __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1459 i.InputOperand(0));
1460 }
1461 break;
1462 }
1463 case kX64Movsxbl:
1464 ASSEMBLE_MOVX(movsxbl);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001465 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001466 break;
1467 case kX64Movzxbl:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001468 ASSEMBLE_MOVX(movzxbl);
1469 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001470 break;
1471 case kX64Movb: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001472 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001473 Operand operand = i.MemoryOperand(&index);
1474 if (HasImmediateInput(instr, index)) {
1475 __ movb(operand, Immediate(i.InputInt8(index)));
1476 } else {
1477 __ movb(operand, i.InputRegister(index));
1478 }
1479 break;
1480 }
1481 case kX64Movsxwl:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001482 ASSEMBLE_MOVX(movsxwl);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001483 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001484 break;
1485 case kX64Movzxwl:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001486 ASSEMBLE_MOVX(movzxwl);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001487 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001488 break;
1489 case kX64Movw: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001490 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001491 Operand operand = i.MemoryOperand(&index);
1492 if (HasImmediateInput(instr, index)) {
1493 __ movw(operand, Immediate(i.InputInt16(index)));
1494 } else {
1495 __ movw(operand, i.InputRegister(index));
1496 }
1497 break;
1498 }
1499 case kX64Movl:
1500 if (instr->HasOutput()) {
1501 if (instr->addressing_mode() == kMode_None) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001502 if (instr->InputAt(0)->IsRegister()) {
1503 __ movl(i.OutputRegister(), i.InputRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001504 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001505 __ movl(i.OutputRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001506 }
1507 } else {
1508 __ movl(i.OutputRegister(), i.MemoryOperand());
1509 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001510 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001511 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001512 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001513 Operand operand = i.MemoryOperand(&index);
1514 if (HasImmediateInput(instr, index)) {
1515 __ movl(operand, i.InputImmediate(index));
1516 } else {
1517 __ movl(operand, i.InputRegister(index));
1518 }
1519 }
1520 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001521 case kX64Movsxlq:
1522 ASSEMBLE_MOVX(movsxlq);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001523 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001524 case kX64Movq:
1525 if (instr->HasOutput()) {
1526 __ movq(i.OutputRegister(), i.MemoryOperand());
1527 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001528 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001529 Operand operand = i.MemoryOperand(&index);
1530 if (HasImmediateInput(instr, index)) {
1531 __ movq(operand, i.InputImmediate(index));
1532 } else {
1533 __ movq(operand, i.InputRegister(index));
1534 }
1535 }
1536 break;
1537 case kX64Movss:
1538 if (instr->HasOutput()) {
1539 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001540 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001541 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001542 Operand operand = i.MemoryOperand(&index);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001543 __ movss(operand, i.InputDoubleRegister(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001544 }
1545 break;
1546 case kX64Movsd:
1547 if (instr->HasOutput()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001548 __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001549 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001550 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001551 Operand operand = i.MemoryOperand(&index);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001552 __ Movsd(operand, i.InputDoubleRegister(index));
1553 }
1554 break;
1555 case kX64BitcastFI:
1556 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1557 __ movl(i.OutputRegister(), i.InputOperand(0));
1558 } else {
1559 __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
1560 }
1561 break;
1562 case kX64BitcastDL:
1563 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1564 __ movq(i.OutputRegister(), i.InputOperand(0));
1565 } else {
1566 __ Movq(i.OutputRegister(), i.InputDoubleRegister(0));
1567 }
1568 break;
1569 case kX64BitcastIF:
1570 if (instr->InputAt(0)->IsRegister()) {
1571 __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
1572 } else {
1573 __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
1574 }
1575 break;
1576 case kX64BitcastLD:
1577 if (instr->InputAt(0)->IsRegister()) {
1578 __ Movq(i.OutputDoubleRegister(), i.InputRegister(0));
1579 } else {
1580 __ Movsd(i.OutputDoubleRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001581 }
1582 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001583 case kX64Lea32: {
1584 AddressingMode mode = AddressingModeField::decode(instr->opcode());
1585 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
1586 // and addressing mode just happens to work out. The "addl"/"subl" forms
1587 // in these cases are faster based on measurements.
1588 if (i.InputRegister(0).is(i.OutputRegister())) {
1589 if (mode == kMode_MRI) {
1590 int32_t constant_summand = i.InputInt32(1);
1591 if (constant_summand > 0) {
1592 __ addl(i.OutputRegister(), Immediate(constant_summand));
1593 } else if (constant_summand < 0) {
1594 __ subl(i.OutputRegister(), Immediate(-constant_summand));
1595 }
1596 } else if (mode == kMode_MR1) {
1597 if (i.InputRegister(1).is(i.OutputRegister())) {
1598 __ shll(i.OutputRegister(), Immediate(1));
1599 } else {
1600 __ leal(i.OutputRegister(), i.MemoryOperand());
1601 }
1602 } else if (mode == kMode_M2) {
1603 __ shll(i.OutputRegister(), Immediate(1));
1604 } else if (mode == kMode_M4) {
1605 __ shll(i.OutputRegister(), Immediate(2));
1606 } else if (mode == kMode_M8) {
1607 __ shll(i.OutputRegister(), Immediate(3));
1608 } else {
1609 __ leal(i.OutputRegister(), i.MemoryOperand());
1610 }
1611 } else {
1612 __ leal(i.OutputRegister(), i.MemoryOperand());
1613 }
1614 __ AssertZeroExtended(i.OutputRegister());
1615 break;
1616 }
1617 case kX64Lea:
1618 __ leaq(i.OutputRegister(), i.MemoryOperand());
1619 break;
1620 case kX64Dec32:
1621 __ decl(i.OutputRegister());
1622 break;
1623 case kX64Inc32:
1624 __ incl(i.OutputRegister());
1625 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001626 case kX64Push:
1627 if (HasImmediateInput(instr, 0)) {
1628 __ pushq(i.InputImmediate(0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001629 frame_access_state()->IncreaseSPDelta(1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001630 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001631 if (instr->InputAt(0)->IsRegister()) {
1632 __ pushq(i.InputRegister(0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001633 frame_access_state()->IncreaseSPDelta(1);
1634 } else if (instr->InputAt(0)->IsDoubleRegister()) {
1635 // TODO(titzer): use another machine instruction?
1636 __ subq(rsp, Immediate(kDoubleSize));
1637 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1638 __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001639 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001640 __ pushq(i.InputOperand(0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001641 frame_access_state()->IncreaseSPDelta(1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001642 }
1643 }
1644 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001645 case kX64Poke: {
1646 int const slot = MiscField::decode(instr->opcode());
1647 if (HasImmediateInput(instr, 0)) {
1648 __ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
1649 } else {
1650 __ movq(Operand(rsp, slot * kPointerSize), i.InputRegister(0));
1651 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001652 break;
1653 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001654 case kCheckedLoadInt8:
1655 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
1656 break;
1657 case kCheckedLoadUint8:
1658 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
1659 break;
1660 case kCheckedLoadInt16:
1661 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
1662 break;
1663 case kCheckedLoadUint16:
1664 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
1665 break;
1666 case kCheckedLoadWord32:
1667 ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
1668 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001669 case kCheckedLoadWord64:
1670 ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
1671 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001672 case kCheckedLoadFloat32:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001673 ASSEMBLE_CHECKED_LOAD_FLOAT(Movss);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001674 break;
1675 case kCheckedLoadFloat64:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001676 ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001677 break;
1678 case kCheckedStoreWord8:
1679 ASSEMBLE_CHECKED_STORE_INTEGER(movb);
1680 break;
1681 case kCheckedStoreWord16:
1682 ASSEMBLE_CHECKED_STORE_INTEGER(movw);
1683 break;
1684 case kCheckedStoreWord32:
1685 ASSEMBLE_CHECKED_STORE_INTEGER(movl);
1686 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001687 case kCheckedStoreWord64:
1688 ASSEMBLE_CHECKED_STORE_INTEGER(movq);
1689 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001690 case kCheckedStoreFloat32:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001691 ASSEMBLE_CHECKED_STORE_FLOAT(Movss);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001692 break;
1693 case kCheckedStoreFloat64:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001694 ASSEMBLE_CHECKED_STORE_FLOAT(Movsd);
1695 break;
1696 case kX64StackCheck:
1697 __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001698 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001699 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001700} // NOLINT(readability/fn_size)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001701
1702
1703// Assembles branches after this instruction.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001704void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001705 X64OperandConverter i(this, instr);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001706 Label::Distance flabel_distance =
1707 branch->fallthru ? Label::kNear : Label::kFar;
1708 Label* tlabel = branch->true_label;
1709 Label* flabel = branch->false_label;
1710 switch (branch->condition) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001711 case kUnorderedEqual:
1712 __ j(parity_even, flabel, flabel_distance);
1713 // Fall through.
1714 case kEqual:
1715 __ j(equal, tlabel);
1716 break;
1717 case kUnorderedNotEqual:
1718 __ j(parity_even, tlabel);
1719 // Fall through.
1720 case kNotEqual:
1721 __ j(not_equal, tlabel);
1722 break;
1723 case kSignedLessThan:
1724 __ j(less, tlabel);
1725 break;
1726 case kSignedGreaterThanOrEqual:
1727 __ j(greater_equal, tlabel);
1728 break;
1729 case kSignedLessThanOrEqual:
1730 __ j(less_equal, tlabel);
1731 break;
1732 case kSignedGreaterThan:
1733 __ j(greater, tlabel);
1734 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001735 case kUnsignedLessThan:
1736 __ j(below, tlabel);
1737 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001738 case kUnsignedGreaterThanOrEqual:
1739 __ j(above_equal, tlabel);
1740 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001741 case kUnsignedLessThanOrEqual:
1742 __ j(below_equal, tlabel);
1743 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001744 case kUnsignedGreaterThan:
1745 __ j(above, tlabel);
1746 break;
1747 case kOverflow:
1748 __ j(overflow, tlabel);
1749 break;
1750 case kNotOverflow:
1751 __ j(no_overflow, tlabel);
1752 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001753 default:
1754 UNREACHABLE();
1755 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001756 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001757 if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1758}
1759
1760
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001761void CodeGenerator::AssembleArchJump(RpoNumber target) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001762 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001763}
1764
1765
1766// Assembles boolean materializations after this instruction.
1767void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1768 FlagsCondition condition) {
1769 X64OperandConverter i(this, instr);
1770 Label done;
1771
1772 // Materialize a full 64-bit 1 or 0 value. The result register is always the
1773 // last output of the instruction.
1774 Label check;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001775 DCHECK_NE(0u, instr->OutputCount());
1776 Register reg = i.OutputRegister(instr->OutputCount() - 1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001777 Condition cc = no_condition;
1778 switch (condition) {
1779 case kUnorderedEqual:
1780 __ j(parity_odd, &check, Label::kNear);
1781 __ movl(reg, Immediate(0));
1782 __ jmp(&done, Label::kNear);
1783 // Fall through.
1784 case kEqual:
1785 cc = equal;
1786 break;
1787 case kUnorderedNotEqual:
1788 __ j(parity_odd, &check, Label::kNear);
1789 __ movl(reg, Immediate(1));
1790 __ jmp(&done, Label::kNear);
1791 // Fall through.
1792 case kNotEqual:
1793 cc = not_equal;
1794 break;
1795 case kSignedLessThan:
1796 cc = less;
1797 break;
1798 case kSignedGreaterThanOrEqual:
1799 cc = greater_equal;
1800 break;
1801 case kSignedLessThanOrEqual:
1802 cc = less_equal;
1803 break;
1804 case kSignedGreaterThan:
1805 cc = greater;
1806 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001807 case kUnsignedLessThan:
1808 cc = below;
1809 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001810 case kUnsignedGreaterThanOrEqual:
1811 cc = above_equal;
1812 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001813 case kUnsignedLessThanOrEqual:
1814 cc = below_equal;
1815 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001816 case kUnsignedGreaterThan:
1817 cc = above;
1818 break;
1819 case kOverflow:
1820 cc = overflow;
1821 break;
1822 case kNotOverflow:
1823 cc = no_overflow;
1824 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001825 default:
1826 UNREACHABLE();
1827 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001828 }
1829 __ bind(&check);
1830 __ setcc(cc, reg);
1831 __ movzxbl(reg, reg);
1832 __ bind(&done);
1833}
1834
1835
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001836void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1837 X64OperandConverter i(this, instr);
1838 Register input = i.InputRegister(0);
1839 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1840 __ cmpl(input, Immediate(i.InputInt32(index + 0)));
1841 __ j(equal, GetLabel(i.InputRpo(index + 1)));
1842 }
1843 AssembleArchJump(i.InputRpo(1));
1844}
1845
1846
1847void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1848 X64OperandConverter i(this, instr);
1849 Register input = i.InputRegister(0);
1850 int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1851 Label** cases = zone()->NewArray<Label*>(case_count);
1852 for (int32_t index = 0; index < case_count; ++index) {
1853 cases[index] = GetLabel(i.InputRpo(index + 2));
1854 }
1855 Label* const table = AddJumpTable(cases, case_count);
1856 __ cmpl(input, Immediate(case_count));
1857 __ j(above_equal, GetLabel(i.InputRpo(1)));
1858 __ leaq(kScratchRegister, Operand(table));
1859 __ jmp(Operand(kScratchRegister, input, times_8, 0));
1860}
1861
1862
1863void CodeGenerator::AssembleDeoptimizerCall(
1864 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001865 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001866 isolate(), deoptimization_id, bailout_type);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001867 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1868}
1869
1870
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001871namespace {
1872
1873static const int kQuadWordSize = 16;
1874
1875} // namespace
1876
1877
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001878void CodeGenerator::AssemblePrologue() {
1879 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001880 if (descriptor->IsCFunctionCall()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001881 __ pushq(rbp);
1882 __ movq(rbp, rsp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001883 } else if (descriptor->IsJSFunctionCall()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001884 __ Prologue(this->info()->GeneratePreagedPrologue());
1885 } else if (frame()->needs_frame()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001886 __ StubPrologue();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001887 } else {
1888 frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001889 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001890 frame_access_state()->SetFrameAccessToDefault();
1891
1892 int stack_shrink_slots = frame()->GetSpillSlotCount();
1893 if (info()->is_osr()) {
1894 // TurboFan OSR-compiled functions cannot be entered directly.
1895 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1896
1897 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1898 // frame is still on the stack. Optimized code uses OSR values directly from
1899 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1900 // remaining stack slots.
1901 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1902 osr_pc_offset_ = __ pc_offset();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001903 stack_shrink_slots -=
1904 static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
1905 }
1906
1907 const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1908 if (saves_fp != 0) {
1909 stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
1910 }
1911 if (stack_shrink_slots > 0) {
1912 __ subq(rsp, Immediate(stack_shrink_slots * kPointerSize));
1913 }
1914
1915 if (saves_fp != 0) { // Save callee-saved XMM registers.
1916 const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
1917 const int stack_size = saves_fp_count * kQuadWordSize;
1918 // Adjust the stack pointer.
1919 __ subp(rsp, Immediate(stack_size));
1920 // Store the registers on the stack.
1921 int slot_idx = 0;
1922 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1923 if (!((1 << i) & saves_fp)) continue;
1924 __ movdqu(Operand(rsp, kQuadWordSize * slot_idx),
1925 XMMRegister::from_code(i));
1926 slot_idx++;
1927 }
1928 frame()->AllocateSavedCalleeRegisterSlots(saves_fp_count *
1929 (kQuadWordSize / kPointerSize));
1930 }
1931
1932 const RegList saves = descriptor->CalleeSavedRegisters();
1933 if (saves != 0) { // Save callee-saved registers.
1934 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1935 if (!((1 << i) & saves)) continue;
1936 __ pushq(Register::from_code(i));
1937 frame()->AllocateSavedCalleeRegisterSlots(1);
1938 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001939 }
1940}
1941
1942
1943void CodeGenerator::AssembleReturn() {
1944 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001945
1946 // Restore registers.
1947 const RegList saves = descriptor->CalleeSavedRegisters();
1948 if (saves != 0) {
1949 for (int i = 0; i < Register::kNumRegisters; i++) {
1950 if (!((1 << i) & saves)) continue;
1951 __ popq(Register::from_code(i));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001952 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001953 }
1954 const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1955 if (saves_fp != 0) {
1956 const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
1957 const int stack_size = saves_fp_count * kQuadWordSize;
1958 // Load the registers from the stack.
1959 int slot_idx = 0;
1960 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1961 if (!((1 << i) & saves_fp)) continue;
1962 __ movdqu(XMMRegister::from_code(i),
1963 Operand(rsp, kQuadWordSize * slot_idx));
1964 slot_idx++;
1965 }
1966 // Adjust the stack pointer.
1967 __ addp(rsp, Immediate(stack_size));
1968 }
1969
1970 if (descriptor->IsCFunctionCall()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001971 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1972 __ popq(rbp); // Pop caller's frame pointer.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001973 } else if (frame()->needs_frame()) {
1974 // Canonicalize JSFunction return sites for now.
1975 if (return_label_.is_bound()) {
1976 __ jmp(&return_label_);
1977 return;
1978 } else {
1979 __ bind(&return_label_);
1980 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1981 __ popq(rbp); // Pop caller's frame pointer.
1982 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001983 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001984 size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
1985 // Might need rcx for scratch if pop_size is too big.
1986 DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rcx.bit());
1987 __ Ret(static_cast<int>(pop_size), rcx);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001988}
1989
1990
1991void CodeGenerator::AssembleMove(InstructionOperand* source,
1992 InstructionOperand* destination) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001993 X64OperandConverter g(this, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001994 // Dispatch on the source and destination operand kinds. Not all
1995 // combinations are possible.
1996 if (source->IsRegister()) {
1997 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1998 Register src = g.ToRegister(source);
1999 if (destination->IsRegister()) {
2000 __ movq(g.ToRegister(destination), src);
2001 } else {
2002 __ movq(g.ToOperand(destination), src);
2003 }
2004 } else if (source->IsStackSlot()) {
2005 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2006 Operand src = g.ToOperand(source);
2007 if (destination->IsRegister()) {
2008 Register dst = g.ToRegister(destination);
2009 __ movq(dst, src);
2010 } else {
2011 // Spill on demand to use a temporary register for memory-to-memory
2012 // moves.
2013 Register tmp = kScratchRegister;
2014 Operand dst = g.ToOperand(destination);
2015 __ movq(tmp, src);
2016 __ movq(dst, tmp);
2017 }
2018 } else if (source->IsConstant()) {
2019 ConstantOperand* constant_source = ConstantOperand::cast(source);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002020 Constant src = g.ToConstant(constant_source);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002021 if (destination->IsRegister() || destination->IsStackSlot()) {
2022 Register dst = destination->IsRegister() ? g.ToRegister(destination)
2023 : kScratchRegister;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002024 switch (src.type()) {
2025 case Constant::kInt32:
2026 // TODO(dcarney): don't need scratch in this case.
2027 __ Set(dst, src.ToInt32());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002028 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002029 case Constant::kInt64:
2030 __ Set(dst, src.ToInt64());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002031 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002032 case Constant::kFloat32:
2033 __ Move(dst,
2034 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
2035 break;
2036 case Constant::kFloat64:
2037 __ Move(dst,
2038 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
2039 break;
2040 case Constant::kExternalReference:
2041 __ Move(dst, src.ToExternalReference());
2042 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002043 case Constant::kHeapObject: {
2044 Handle<HeapObject> src_object = src.ToHeapObject();
2045 Heap::RootListIndex index;
2046 int offset;
2047 if (IsMaterializableFromFrame(src_object, &offset)) {
2048 __ movp(dst, Operand(rbp, offset));
2049 } else if (IsMaterializableFromRoot(src_object, &index)) {
2050 __ LoadRoot(dst, index);
2051 } else {
2052 __ Move(dst, src_object);
2053 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002054 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002055 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002056 case Constant::kRpoNumber:
2057 UNREACHABLE(); // TODO(dcarney): load of labels on x64.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002058 break;
2059 }
2060 if (destination->IsStackSlot()) {
2061 __ movq(g.ToOperand(destination), kScratchRegister);
2062 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002063 } else if (src.type() == Constant::kFloat32) {
2064 // TODO(turbofan): Can we do better here?
2065 uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002066 if (destination->IsDoubleRegister()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002067 __ Move(g.ToDoubleRegister(destination), src_const);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002068 } else {
2069 DCHECK(destination->IsDoubleStackSlot());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002070 Operand dst = g.ToOperand(destination);
2071 __ movl(dst, Immediate(src_const));
2072 }
2073 } else {
2074 DCHECK_EQ(Constant::kFloat64, src.type());
2075 uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
2076 if (destination->IsDoubleRegister()) {
2077 __ Move(g.ToDoubleRegister(destination), src_const);
2078 } else {
2079 DCHECK(destination->IsDoubleStackSlot());
2080 __ movq(kScratchRegister, src_const);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002081 __ movq(g.ToOperand(destination), kScratchRegister);
2082 }
2083 }
2084 } else if (source->IsDoubleRegister()) {
2085 XMMRegister src = g.ToDoubleRegister(source);
2086 if (destination->IsDoubleRegister()) {
2087 XMMRegister dst = g.ToDoubleRegister(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002088 __ Movapd(dst, src);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002089 } else {
2090 DCHECK(destination->IsDoubleStackSlot());
2091 Operand dst = g.ToOperand(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002092 __ Movsd(dst, src);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002093 }
2094 } else if (source->IsDoubleStackSlot()) {
2095 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
2096 Operand src = g.ToOperand(source);
2097 if (destination->IsDoubleRegister()) {
2098 XMMRegister dst = g.ToDoubleRegister(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002099 __ Movsd(dst, src);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002100 } else {
2101 // We rely on having xmm0 available as a fixed scratch register.
2102 Operand dst = g.ToOperand(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002103 __ Movsd(xmm0, src);
2104 __ Movsd(dst, xmm0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002105 }
2106 } else {
2107 UNREACHABLE();
2108 }
2109}
2110
2111
2112void CodeGenerator::AssembleSwap(InstructionOperand* source,
2113 InstructionOperand* destination) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002114 X64OperandConverter g(this, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002115 // Dispatch on the source and destination operand kinds. Not all
2116 // combinations are possible.
2117 if (source->IsRegister() && destination->IsRegister()) {
2118 // Register-register.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002119 Register src = g.ToRegister(source);
2120 Register dst = g.ToRegister(destination);
2121 __ movq(kScratchRegister, src);
2122 __ movq(src, dst);
2123 __ movq(dst, kScratchRegister);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002124 } else if (source->IsRegister() && destination->IsStackSlot()) {
2125 Register src = g.ToRegister(source);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002126 __ pushq(src);
2127 frame_access_state()->IncreaseSPDelta(1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002128 Operand dst = g.ToOperand(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002129 __ movq(src, dst);
2130 frame_access_state()->IncreaseSPDelta(-1);
2131 dst = g.ToOperand(destination);
2132 __ popq(dst);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002133 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
2134 (source->IsDoubleStackSlot() &&
2135 destination->IsDoubleStackSlot())) {
2136 // Memory-memory.
2137 Register tmp = kScratchRegister;
2138 Operand src = g.ToOperand(source);
2139 Operand dst = g.ToOperand(destination);
2140 __ movq(tmp, dst);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002141 __ pushq(src);
2142 frame_access_state()->IncreaseSPDelta(1);
2143 src = g.ToOperand(source);
2144 __ movq(src, tmp);
2145 frame_access_state()->IncreaseSPDelta(-1);
2146 dst = g.ToOperand(destination);
2147 __ popq(dst);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002148 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
2149 // XMM register-register swap. We rely on having xmm0
2150 // available as a fixed scratch register.
2151 XMMRegister src = g.ToDoubleRegister(source);
2152 XMMRegister dst = g.ToDoubleRegister(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002153 __ Movapd(xmm0, src);
2154 __ Movapd(src, dst);
2155 __ Movapd(dst, xmm0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002156 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002157 // XMM register-memory swap. We rely on having xmm0
2158 // available as a fixed scratch register.
2159 XMMRegister src = g.ToDoubleRegister(source);
2160 Operand dst = g.ToOperand(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002161 __ Movsd(xmm0, src);
2162 __ Movsd(src, dst);
2163 __ Movsd(dst, xmm0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002164 } else {
2165 // No other combinations are possible.
2166 UNREACHABLE();
2167 }
2168}
2169
2170
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002171void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2172 for (size_t index = 0; index < target_count; ++index) {
2173 __ dq(targets[index]);
2174 }
2175}
2176
2177
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002178void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
2179
2180
2181void CodeGenerator::EnsureSpaceForLazyDeopt() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002182 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2183 return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002184 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002185
2186 int space_needed = Deoptimizer::patch_size();
2187 // Ensure that we have enough space after the previous lazy-bailout
2188 // instruction for patching the code here.
2189 int current_pc = masm()->pc_offset();
2190 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2191 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2192 __ Nop(padding_size);
2193 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002194}
2195
2196#undef __
2197
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002198} // namespace compiler
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002199} // namespace internal
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002200} // namespace v8