blob: 0480f9dc90e34745161d37eb4efd8c8cc7e1d469 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
7#include "src/compiler/code-generator-impl.h"
8#include "src/compiler/gap-resolver.h"
9#include "src/compiler/node-matchers.h"
10#include "src/compiler/node-properties-inl.h"
11#include "src/scopes.h"
12#include "src/x64/assembler-x64.h"
13#include "src/x64/macro-assembler-x64.h"
14
15namespace v8 {
16namespace internal {
17namespace compiler {
18
19#define __ masm()->
20
21
Ben Murdochb8a8cc12014-11-26 15:28:44 +000022// Adds X64 specific methods for decoding operands.
23class X64OperandConverter : public InstructionOperandConverter {
24 public:
25 X64OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
27
Ben Murdochb8a8cc12014-11-26 15:28:44 +000028 Immediate InputImmediate(int index) {
29 return ToImmediate(instr_->InputAt(index));
30 }
31
Emily Bernierd0a1eb72015-03-24 16:35:39 -040032 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000033
Emily Bernierd0a1eb72015-03-24 16:35:39 -040034 Operand OutputOperand() { return ToOperand(instr_->Output()); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000035
36 Immediate ToImmediate(InstructionOperand* operand) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -040037 return Immediate(ToConstant(operand).ToInt32());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000038 }
39
40 Operand ToOperand(InstructionOperand* op, int extra = 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000041 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000042 // The linkage computes where all spill slots are located.
43 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
Emily Bernierd0a1eb72015-03-24 16:35:39 -040044 return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000045 }
46
Emily Bernierd0a1eb72015-03-24 16:35:39 -040047 static int NextOffset(int* offset) {
48 int i = *offset;
49 (*offset)++;
50 return i;
51 }
52
53 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
54 STATIC_ASSERT(0 == static_cast<int>(times_1));
55 STATIC_ASSERT(1 == static_cast<int>(times_2));
56 STATIC_ASSERT(2 == static_cast<int>(times_4));
57 STATIC_ASSERT(3 == static_cast<int>(times_8));
58 int scale = static_cast<int>(mode - one);
59 DCHECK(scale >= 0 && scale < 4);
60 return static_cast<ScaleFactor>(scale);
61 }
62
63 Operand MemoryOperand(int* offset) {
64 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
65 switch (mode) {
66 case kMode_MR: {
67 Register base = InputRegister(NextOffset(offset));
68 int32_t disp = 0;
69 return Operand(base, disp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000070 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -040071 case kMode_MRI: {
72 Register base = InputRegister(NextOffset(offset));
73 int32_t disp = InputInt32(NextOffset(offset));
74 return Operand(base, disp);
75 }
76 case kMode_MR1:
77 case kMode_MR2:
78 case kMode_MR4:
79 case kMode_MR8: {
80 Register base = InputRegister(NextOffset(offset));
81 Register index = InputRegister(NextOffset(offset));
82 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
83 int32_t disp = 0;
84 return Operand(base, index, scale, disp);
85 }
86 case kMode_MR1I:
87 case kMode_MR2I:
88 case kMode_MR4I:
89 case kMode_MR8I: {
90 Register base = InputRegister(NextOffset(offset));
91 Register index = InputRegister(NextOffset(offset));
92 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
93 int32_t disp = InputInt32(NextOffset(offset));
94 return Operand(base, index, scale, disp);
95 }
96 case kMode_M1: {
97 Register base = InputRegister(NextOffset(offset));
98 int32_t disp = 0;
99 return Operand(base, disp);
100 }
101 case kMode_M2:
102 UNREACHABLE(); // Should use kModeMR with more compact encoding instead
103 return Operand(no_reg, 0);
104 case kMode_M4:
105 case kMode_M8: {
106 Register index = InputRegister(NextOffset(offset));
107 ScaleFactor scale = ScaleFor(kMode_M1, mode);
108 int32_t disp = 0;
109 return Operand(index, scale, disp);
110 }
111 case kMode_M1I:
112 case kMode_M2I:
113 case kMode_M4I:
114 case kMode_M8I: {
115 Register index = InputRegister(NextOffset(offset));
116 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
117 int32_t disp = InputInt32(NextOffset(offset));
118 return Operand(index, scale, disp);
119 }
120 case kMode_None:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000121 UNREACHABLE();
122 return Operand(no_reg, 0);
123 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400124 UNREACHABLE();
125 return Operand(no_reg, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000126 }
127
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400128 Operand MemoryOperand(int first_input = 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000129 return MemoryOperand(&first_input);
130 }
131};
132
133
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400134namespace {
135
136bool HasImmediateInput(Instruction* instr, int index) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000137 return instr->InputAt(index)->IsImmediate();
138}
139
140
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400141class OutOfLineLoadZero FINAL : public OutOfLineCode {
142 public:
143 OutOfLineLoadZero(CodeGenerator* gen, Register result)
144 : OutOfLineCode(gen), result_(result) {}
145
146 void Generate() FINAL { __ xorl(result_, result_); }
147
148 private:
149 Register const result_;
150};
151
152
153class OutOfLineLoadNaN FINAL : public OutOfLineCode {
154 public:
155 OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
156 : OutOfLineCode(gen), result_(result) {}
157
158 void Generate() FINAL { __ pcmpeqd(result_, result_); }
159
160 private:
161 XMMRegister const result_;
162};
163
164
165class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
166 public:
167 OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
168 XMMRegister input)
169 : OutOfLineCode(gen), result_(result), input_(input) {}
170
171 void Generate() FINAL {
172 __ subp(rsp, Immediate(kDoubleSize));
173 __ movsd(MemOperand(rsp, 0), input_);
174 __ SlowTruncateToI(result_, rsp, 0);
175 __ addp(rsp, Immediate(kDoubleSize));
176 }
177
178 private:
179 Register const result_;
180 XMMRegister const input_;
181};
182
183} // namespace
184
185
186#define ASSEMBLE_UNOP(asm_instr) \
187 do { \
188 if (instr->Output()->IsRegister()) { \
189 __ asm_instr(i.OutputRegister()); \
190 } else { \
191 __ asm_instr(i.OutputOperand()); \
192 } \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000193 } while (0)
194
195
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400196#define ASSEMBLE_BINOP(asm_instr) \
197 do { \
198 if (HasImmediateInput(instr, 1)) { \
199 if (instr->InputAt(0)->IsRegister()) { \
200 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
201 } else { \
202 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
203 } \
204 } else { \
205 if (instr->InputAt(1)->IsRegister()) { \
206 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
207 } else { \
208 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
209 } \
210 } \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000211 } while (0)
212
213
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400214#define ASSEMBLE_MULT(asm_instr) \
215 do { \
216 if (HasImmediateInput(instr, 1)) { \
217 if (instr->InputAt(0)->IsRegister()) { \
218 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
219 i.InputImmediate(1)); \
220 } else { \
221 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
222 i.InputImmediate(1)); \
223 } \
224 } else { \
225 if (instr->InputAt(1)->IsRegister()) { \
226 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
227 } else { \
228 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
229 } \
230 } \
231 } while (0)
232
233
234#define ASSEMBLE_SHIFT(asm_instr, width) \
235 do { \
236 if (HasImmediateInput(instr, 1)) { \
237 if (instr->Output()->IsRegister()) { \
238 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
239 } else { \
240 __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1))); \
241 } \
242 } else { \
243 if (instr->Output()->IsRegister()) { \
244 __ asm_instr##_cl(i.OutputRegister()); \
245 } else { \
246 __ asm_instr##_cl(i.OutputOperand()); \
247 } \
248 } \
249 } while (0)
250
251
252#define ASSEMBLE_DOUBLE_BINOP(asm_instr) \
253 do { \
254 if (instr->InputAt(1)->IsDoubleRegister()) { \
255 __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
256 } else { \
257 __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
258 } \
259 } while (0)
260
261
262#define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr) \
263 do { \
264 CpuFeatureScope avx_scope(masm(), AVX); \
265 if (instr->InputAt(1)->IsDoubleRegister()) { \
266 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
267 i.InputDoubleRegister(1)); \
268 } else { \
269 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
270 i.InputOperand(1)); \
271 } \
272 } while (0)
273
274
275#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
276 do { \
277 auto result = i.OutputDoubleRegister(); \
278 auto buffer = i.InputRegister(0); \
279 auto index1 = i.InputRegister(1); \
280 auto index2 = i.InputInt32(2); \
281 OutOfLineCode* ool; \
282 if (instr->InputAt(3)->IsRegister()) { \
283 auto length = i.InputRegister(3); \
284 DCHECK_EQ(0, index2); \
285 __ cmpl(index1, length); \
286 ool = new (zone()) OutOfLineLoadNaN(this, result); \
287 } else { \
288 auto length = i.InputInt32(3); \
289 DCHECK_LE(index2, length); \
290 __ cmpq(index1, Immediate(length - index2)); \
291 class OutOfLineLoadFloat FINAL : public OutOfLineCode { \
292 public: \
293 OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
294 Register buffer, Register index1, int32_t index2, \
295 int32_t length) \
296 : OutOfLineCode(gen), \
297 result_(result), \
298 buffer_(buffer), \
299 index1_(index1), \
300 index2_(index2), \
301 length_(length) {} \
302 \
303 void Generate() FINAL { \
304 __ leal(kScratchRegister, Operand(index1_, index2_)); \
305 __ pcmpeqd(result_, result_); \
306 __ cmpl(kScratchRegister, Immediate(length_)); \
307 __ j(above_equal, exit()); \
308 __ asm_instr(result_, \
309 Operand(buffer_, kScratchRegister, times_1, 0)); \
310 } \
311 \
312 private: \
313 XMMRegister const result_; \
314 Register const buffer_; \
315 Register const index1_; \
316 int32_t const index2_; \
317 int32_t const length_; \
318 }; \
319 ool = new (zone()) \
320 OutOfLineLoadFloat(this, result, buffer, index1, index2, length); \
321 } \
322 __ j(above_equal, ool->entry()); \
323 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
324 __ bind(ool->exit()); \
325 } while (false)
326
327
328#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
329 do { \
330 auto result = i.OutputRegister(); \
331 auto buffer = i.InputRegister(0); \
332 auto index1 = i.InputRegister(1); \
333 auto index2 = i.InputInt32(2); \
334 OutOfLineCode* ool; \
335 if (instr->InputAt(3)->IsRegister()) { \
336 auto length = i.InputRegister(3); \
337 DCHECK_EQ(0, index2); \
338 __ cmpl(index1, length); \
339 ool = new (zone()) OutOfLineLoadZero(this, result); \
340 } else { \
341 auto length = i.InputInt32(3); \
342 DCHECK_LE(index2, length); \
343 __ cmpq(index1, Immediate(length - index2)); \
344 class OutOfLineLoadInteger FINAL : public OutOfLineCode { \
345 public: \
346 OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
347 Register buffer, Register index1, int32_t index2, \
348 int32_t length) \
349 : OutOfLineCode(gen), \
350 result_(result), \
351 buffer_(buffer), \
352 index1_(index1), \
353 index2_(index2), \
354 length_(length) {} \
355 \
356 void Generate() FINAL { \
357 Label oob; \
358 __ leal(kScratchRegister, Operand(index1_, index2_)); \
359 __ cmpl(kScratchRegister, Immediate(length_)); \
360 __ j(above_equal, &oob, Label::kNear); \
361 __ asm_instr(result_, \
362 Operand(buffer_, kScratchRegister, times_1, 0)); \
363 __ jmp(exit()); \
364 __ bind(&oob); \
365 __ xorl(result_, result_); \
366 } \
367 \
368 private: \
369 Register const result_; \
370 Register const buffer_; \
371 Register const index1_; \
372 int32_t const index2_; \
373 int32_t const length_; \
374 }; \
375 ool = new (zone()) \
376 OutOfLineLoadInteger(this, result, buffer, index1, index2, length); \
377 } \
378 __ j(above_equal, ool->entry()); \
379 __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
380 __ bind(ool->exit()); \
381 } while (false)
382
383
384#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
385 do { \
386 auto buffer = i.InputRegister(0); \
387 auto index1 = i.InputRegister(1); \
388 auto index2 = i.InputInt32(2); \
389 auto value = i.InputDoubleRegister(4); \
390 if (instr->InputAt(3)->IsRegister()) { \
391 auto length = i.InputRegister(3); \
392 DCHECK_EQ(0, index2); \
393 Label done; \
394 __ cmpl(index1, length); \
395 __ j(above_equal, &done, Label::kNear); \
396 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
397 __ bind(&done); \
398 } else { \
399 auto length = i.InputInt32(3); \
400 DCHECK_LE(index2, length); \
401 __ cmpq(index1, Immediate(length - index2)); \
402 class OutOfLineStoreFloat FINAL : public OutOfLineCode { \
403 public: \
404 OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
405 Register index1, int32_t index2, int32_t length, \
406 XMMRegister value) \
407 : OutOfLineCode(gen), \
408 buffer_(buffer), \
409 index1_(index1), \
410 index2_(index2), \
411 length_(length), \
412 value_(value) {} \
413 \
414 void Generate() FINAL { \
415 __ leal(kScratchRegister, Operand(index1_, index2_)); \
416 __ cmpl(kScratchRegister, Immediate(length_)); \
417 __ j(above_equal, exit()); \
418 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
419 value_); \
420 } \
421 \
422 private: \
423 Register const buffer_; \
424 Register const index1_; \
425 int32_t const index2_; \
426 int32_t const length_; \
427 XMMRegister const value_; \
428 }; \
429 auto ool = new (zone()) \
430 OutOfLineStoreFloat(this, buffer, index1, index2, length, value); \
431 __ j(above_equal, ool->entry()); \
432 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
433 __ bind(ool->exit()); \
434 } \
435 } while (false)
436
437
438#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
439 do { \
440 auto buffer = i.InputRegister(0); \
441 auto index1 = i.InputRegister(1); \
442 auto index2 = i.InputInt32(2); \
443 if (instr->InputAt(3)->IsRegister()) { \
444 auto length = i.InputRegister(3); \
445 DCHECK_EQ(0, index2); \
446 Label done; \
447 __ cmpl(index1, length); \
448 __ j(above_equal, &done, Label::kNear); \
449 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
450 __ bind(&done); \
451 } else { \
452 auto length = i.InputInt32(3); \
453 DCHECK_LE(index2, length); \
454 __ cmpq(index1, Immediate(length - index2)); \
455 class OutOfLineStoreInteger FINAL : public OutOfLineCode { \
456 public: \
457 OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
458 Register index1, int32_t index2, int32_t length, \
459 Value value) \
460 : OutOfLineCode(gen), \
461 buffer_(buffer), \
462 index1_(index1), \
463 index2_(index2), \
464 length_(length), \
465 value_(value) {} \
466 \
467 void Generate() FINAL { \
468 __ leal(kScratchRegister, Operand(index1_, index2_)); \
469 __ cmpl(kScratchRegister, Immediate(length_)); \
470 __ j(above_equal, exit()); \
471 __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
472 value_); \
473 } \
474 \
475 private: \
476 Register const buffer_; \
477 Register const index1_; \
478 int32_t const index2_; \
479 int32_t const length_; \
480 Value const value_; \
481 }; \
482 auto ool = new (zone()) \
483 OutOfLineStoreInteger(this, buffer, index1, index2, length, value); \
484 __ j(above_equal, ool->entry()); \
485 __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
486 __ bind(ool->exit()); \
487 } \
488 } while (false)
489
490
491#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
492 do { \
493 if (instr->InputAt(4)->IsRegister()) { \
494 Register value = i.InputRegister(4); \
495 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
496 } else { \
497 Immediate value = i.InputImmediate(4); \
498 ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
499 } \
500 } while (false)
501
502
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000503// Assembles an instruction after register allocation, producing machine code.
504void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
505 X64OperandConverter i(this, instr);
506
507 switch (ArchOpcodeField::decode(instr->opcode())) {
508 case kArchCallCodeObject: {
509 EnsureSpaceForLazyDeopt();
510 if (HasImmediateInput(instr, 0)) {
511 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
512 __ Call(code, RelocInfo::CODE_TARGET);
513 } else {
514 Register reg = i.InputRegister(0);
515 int entry = Code::kHeaderSize - kHeapObjectTag;
516 __ Call(Operand(reg, entry));
517 }
518 AddSafepointAndDeopt(instr);
519 break;
520 }
521 case kArchCallJSFunction: {
522 EnsureSpaceForLazyDeopt();
523 Register func = i.InputRegister(0);
524 if (FLAG_debug_code) {
525 // Check the function's context matches the context argument.
526 __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
527 __ Assert(equal, kWrongFunctionContext);
528 }
529 __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
530 AddSafepointAndDeopt(instr);
531 break;
532 }
533 case kArchJmp:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400534 AssembleArchJump(i.InputRpo(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000535 break;
536 case kArchNop:
537 // don't emit code for nops.
538 break;
539 case kArchRet:
540 AssembleReturn();
541 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400542 case kArchStackPointer:
543 __ movq(i.OutputRegister(), rsp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000544 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400545 case kArchTruncateDoubleToI: {
546 auto result = i.OutputRegister();
547 auto input = i.InputDoubleRegister(0);
548 auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
549 __ cvttsd2siq(result, input);
550 __ cmpq(result, Immediate(1));
551 __ j(overflow, ool->entry());
552 __ bind(ool->exit());
553 break;
554 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000555 case kX64Add32:
556 ASSEMBLE_BINOP(addl);
557 break;
558 case kX64Add:
559 ASSEMBLE_BINOP(addq);
560 break;
561 case kX64Sub32:
562 ASSEMBLE_BINOP(subl);
563 break;
564 case kX64Sub:
565 ASSEMBLE_BINOP(subq);
566 break;
567 case kX64And32:
568 ASSEMBLE_BINOP(andl);
569 break;
570 case kX64And:
571 ASSEMBLE_BINOP(andq);
572 break;
573 case kX64Cmp32:
574 ASSEMBLE_BINOP(cmpl);
575 break;
576 case kX64Cmp:
577 ASSEMBLE_BINOP(cmpq);
578 break;
579 case kX64Test32:
580 ASSEMBLE_BINOP(testl);
581 break;
582 case kX64Test:
583 ASSEMBLE_BINOP(testq);
584 break;
585 case kX64Imul32:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400586 ASSEMBLE_MULT(imull);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000587 break;
588 case kX64Imul:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400589 ASSEMBLE_MULT(imulq);
590 break;
591 case kX64ImulHigh32:
592 if (instr->InputAt(1)->IsRegister()) {
593 __ imull(i.InputRegister(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000594 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400595 __ imull(i.InputOperand(1));
596 }
597 break;
598 case kX64UmulHigh32:
599 if (instr->InputAt(1)->IsRegister()) {
600 __ mull(i.InputRegister(1));
601 } else {
602 __ mull(i.InputOperand(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000603 }
604 break;
605 case kX64Idiv32:
606 __ cdq();
607 __ idivl(i.InputRegister(1));
608 break;
609 case kX64Idiv:
610 __ cqo();
611 __ idivq(i.InputRegister(1));
612 break;
613 case kX64Udiv32:
614 __ xorl(rdx, rdx);
615 __ divl(i.InputRegister(1));
616 break;
617 case kX64Udiv:
618 __ xorq(rdx, rdx);
619 __ divq(i.InputRegister(1));
620 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400621 case kX64Not:
622 ASSEMBLE_UNOP(notq);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000623 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400624 case kX64Not32:
625 ASSEMBLE_UNOP(notl);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000626 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400627 case kX64Neg:
628 ASSEMBLE_UNOP(negq);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000629 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400630 case kX64Neg32:
631 ASSEMBLE_UNOP(negl);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000632 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000633 case kX64Or32:
634 ASSEMBLE_BINOP(orl);
635 break;
636 case kX64Or:
637 ASSEMBLE_BINOP(orq);
638 break;
639 case kX64Xor32:
640 ASSEMBLE_BINOP(xorl);
641 break;
642 case kX64Xor:
643 ASSEMBLE_BINOP(xorq);
644 break;
645 case kX64Shl32:
646 ASSEMBLE_SHIFT(shll, 5);
647 break;
648 case kX64Shl:
649 ASSEMBLE_SHIFT(shlq, 6);
650 break;
651 case kX64Shr32:
652 ASSEMBLE_SHIFT(shrl, 5);
653 break;
654 case kX64Shr:
655 ASSEMBLE_SHIFT(shrq, 6);
656 break;
657 case kX64Sar32:
658 ASSEMBLE_SHIFT(sarl, 5);
659 break;
660 case kX64Sar:
661 ASSEMBLE_SHIFT(sarq, 6);
662 break;
663 case kX64Ror32:
664 ASSEMBLE_SHIFT(rorl, 5);
665 break;
666 case kX64Ror:
667 ASSEMBLE_SHIFT(rorq, 6);
668 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400669 case kSSEFloat64Cmp:
670 ASSEMBLE_DOUBLE_BINOP(ucomisd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000671 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000672 case kSSEFloat64Add:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400673 ASSEMBLE_DOUBLE_BINOP(addsd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000674 break;
675 case kSSEFloat64Sub:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400676 ASSEMBLE_DOUBLE_BINOP(subsd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000677 break;
678 case kSSEFloat64Mul:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400679 ASSEMBLE_DOUBLE_BINOP(mulsd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000680 break;
681 case kSSEFloat64Div:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400682 ASSEMBLE_DOUBLE_BINOP(divsd);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000683 break;
684 case kSSEFloat64Mod: {
685 __ subq(rsp, Immediate(kDoubleSize));
686 // Move values to st(0) and st(1).
687 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
688 __ fld_d(Operand(rsp, 0));
689 __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
690 __ fld_d(Operand(rsp, 0));
691 // Loop while fprem isn't done.
692 Label mod_loop;
693 __ bind(&mod_loop);
694 // This instructions traps on all kinds inputs, but we are assuming the
695 // floating point control word is set to ignore them all.
696 __ fprem();
697 // The following 2 instruction implicitly use rax.
698 __ fnstsw_ax();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400699 if (CpuFeatures::IsSupported(SAHF)) {
700 CpuFeatureScope sahf_scope(masm(), SAHF);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000701 __ sahf();
702 } else {
703 __ shrl(rax, Immediate(8));
704 __ andl(rax, Immediate(0xFF));
705 __ pushq(rax);
706 __ popfq();
707 }
708 __ j(parity_even, &mod_loop);
709 // Move output to stack and clean up.
710 __ fstp(1);
711 __ fstp_d(Operand(rsp, 0));
712 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
713 __ addq(rsp, Immediate(kDoubleSize));
714 break;
715 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400716 case kSSEFloat64Sqrt:
717 if (instr->InputAt(0)->IsDoubleRegister()) {
718 __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000719 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400720 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000721 }
722 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400723 case kSSEFloat64Floor: {
724 CpuFeatureScope sse_scope(masm(), SSE4_1);
725 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
726 v8::internal::Assembler::kRoundDown);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000727 break;
728 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400729 case kSSEFloat64Ceil: {
730 CpuFeatureScope sse_scope(masm(), SSE4_1);
731 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
732 v8::internal::Assembler::kRoundUp);
733 break;
734 }
735 case kSSEFloat64RoundTruncate: {
736 CpuFeatureScope sse_scope(masm(), SSE4_1);
737 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
738 v8::internal::Assembler::kRoundToZero);
739 break;
740 }
741 case kSSECvtss2sd:
742 if (instr->InputAt(0)->IsDoubleRegister()) {
743 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
744 } else {
745 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
746 }
747 break;
748 case kSSECvtsd2ss:
749 if (instr->InputAt(0)->IsDoubleRegister()) {
750 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
751 } else {
752 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
753 }
754 break;
755 case kSSEFloat64ToInt32:
756 if (instr->InputAt(0)->IsDoubleRegister()) {
757 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
758 } else {
759 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
760 }
761 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000762 case kSSEFloat64ToUint32: {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400763 if (instr->InputAt(0)->IsDoubleRegister()) {
764 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000765 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400766 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000767 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400768 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000769 break;
770 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400771 case kSSEInt32ToFloat64:
772 if (instr->InputAt(0)->IsRegister()) {
773 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000774 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400775 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000776 }
777 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400778 case kSSEUint32ToFloat64:
779 if (instr->InputAt(0)->IsRegister()) {
780 __ movl(kScratchRegister, i.InputRegister(0));
781 } else {
782 __ movl(kScratchRegister, i.InputOperand(0));
783 }
784 __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000785 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400786 case kAVXFloat64Add:
787 ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
788 break;
789 case kAVXFloat64Sub:
790 ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
791 break;
792 case kAVXFloat64Mul:
793 ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
794 break;
795 case kAVXFloat64Div:
796 ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
797 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000798 case kX64Movsxbl:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400799 if (instr->addressing_mode() != kMode_None) {
800 __ movsxbl(i.OutputRegister(), i.MemoryOperand());
801 } else if (instr->InputAt(0)->IsRegister()) {
802 __ movsxbl(i.OutputRegister(), i.InputRegister(0));
803 } else {
804 __ movsxbl(i.OutputRegister(), i.InputOperand(0));
805 }
806 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000807 break;
808 case kX64Movzxbl:
809 __ movzxbl(i.OutputRegister(), i.MemoryOperand());
810 break;
811 case kX64Movb: {
812 int index = 0;
813 Operand operand = i.MemoryOperand(&index);
814 if (HasImmediateInput(instr, index)) {
815 __ movb(operand, Immediate(i.InputInt8(index)));
816 } else {
817 __ movb(operand, i.InputRegister(index));
818 }
819 break;
820 }
821 case kX64Movsxwl:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400822 if (instr->addressing_mode() != kMode_None) {
823 __ movsxwl(i.OutputRegister(), i.MemoryOperand());
824 } else if (instr->InputAt(0)->IsRegister()) {
825 __ movsxwl(i.OutputRegister(), i.InputRegister(0));
826 } else {
827 __ movsxwl(i.OutputRegister(), i.InputOperand(0));
828 }
829 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000830 break;
831 case kX64Movzxwl:
832 __ movzxwl(i.OutputRegister(), i.MemoryOperand());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400833 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000834 break;
835 case kX64Movw: {
836 int index = 0;
837 Operand operand = i.MemoryOperand(&index);
838 if (HasImmediateInput(instr, index)) {
839 __ movw(operand, Immediate(i.InputInt16(index)));
840 } else {
841 __ movw(operand, i.InputRegister(index));
842 }
843 break;
844 }
845 case kX64Movl:
846 if (instr->HasOutput()) {
847 if (instr->addressing_mode() == kMode_None) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400848 if (instr->InputAt(0)->IsRegister()) {
849 __ movl(i.OutputRegister(), i.InputRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000850 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400851 __ movl(i.OutputRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000852 }
853 } else {
854 __ movl(i.OutputRegister(), i.MemoryOperand());
855 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400856 __ AssertZeroExtended(i.OutputRegister());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000857 } else {
858 int index = 0;
859 Operand operand = i.MemoryOperand(&index);
860 if (HasImmediateInput(instr, index)) {
861 __ movl(operand, i.InputImmediate(index));
862 } else {
863 __ movl(operand, i.InputRegister(index));
864 }
865 }
866 break;
867 case kX64Movsxlq: {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400868 if (instr->InputAt(0)->IsRegister()) {
869 __ movsxlq(i.OutputRegister(), i.InputRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000870 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400871 __ movsxlq(i.OutputRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000872 }
873 break;
874 }
875 case kX64Movq:
876 if (instr->HasOutput()) {
877 __ movq(i.OutputRegister(), i.MemoryOperand());
878 } else {
879 int index = 0;
880 Operand operand = i.MemoryOperand(&index);
881 if (HasImmediateInput(instr, index)) {
882 __ movq(operand, i.InputImmediate(index));
883 } else {
884 __ movq(operand, i.InputRegister(index));
885 }
886 }
887 break;
888 case kX64Movss:
889 if (instr->HasOutput()) {
890 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000891 } else {
892 int index = 0;
893 Operand operand = i.MemoryOperand(&index);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400894 __ movss(operand, i.InputDoubleRegister(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000895 }
896 break;
897 case kX64Movsd:
898 if (instr->HasOutput()) {
899 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
900 } else {
901 int index = 0;
902 Operand operand = i.MemoryOperand(&index);
903 __ movsd(operand, i.InputDoubleRegister(index));
904 }
905 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400906 case kX64Lea32: {
907 AddressingMode mode = AddressingModeField::decode(instr->opcode());
908 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
909 // and addressing mode just happens to work out. The "addl"/"subl" forms
910 // in these cases are faster based on measurements.
911 if (i.InputRegister(0).is(i.OutputRegister())) {
912 if (mode == kMode_MRI) {
913 int32_t constant_summand = i.InputInt32(1);
914 if (constant_summand > 0) {
915 __ addl(i.OutputRegister(), Immediate(constant_summand));
916 } else if (constant_summand < 0) {
917 __ subl(i.OutputRegister(), Immediate(-constant_summand));
918 }
919 } else if (mode == kMode_MR1) {
920 if (i.InputRegister(1).is(i.OutputRegister())) {
921 __ shll(i.OutputRegister(), Immediate(1));
922 } else {
923 __ leal(i.OutputRegister(), i.MemoryOperand());
924 }
925 } else if (mode == kMode_M2) {
926 __ shll(i.OutputRegister(), Immediate(1));
927 } else if (mode == kMode_M4) {
928 __ shll(i.OutputRegister(), Immediate(2));
929 } else if (mode == kMode_M8) {
930 __ shll(i.OutputRegister(), Immediate(3));
931 } else {
932 __ leal(i.OutputRegister(), i.MemoryOperand());
933 }
934 } else {
935 __ leal(i.OutputRegister(), i.MemoryOperand());
936 }
937 __ AssertZeroExtended(i.OutputRegister());
938 break;
939 }
940 case kX64Lea:
941 __ leaq(i.OutputRegister(), i.MemoryOperand());
942 break;
943 case kX64Dec32:
944 __ decl(i.OutputRegister());
945 break;
946 case kX64Inc32:
947 __ incl(i.OutputRegister());
948 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000949 case kX64Push:
950 if (HasImmediateInput(instr, 0)) {
951 __ pushq(i.InputImmediate(0));
952 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400953 if (instr->InputAt(0)->IsRegister()) {
954 __ pushq(i.InputRegister(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000955 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400956 __ pushq(i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000957 }
958 }
959 break;
960 case kX64StoreWriteBarrier: {
961 Register object = i.InputRegister(0);
962 Register index = i.InputRegister(1);
963 Register value = i.InputRegister(2);
964 __ movsxlq(index, index);
965 __ movq(Operand(object, index, times_1, 0), value);
966 __ leaq(index, Operand(object, index, times_1, 0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400967 SaveFPRegsMode mode =
968 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000969 __ RecordWrite(object, index, value, mode);
970 break;
971 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400972 case kCheckedLoadInt8:
973 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
974 break;
975 case kCheckedLoadUint8:
976 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
977 break;
978 case kCheckedLoadInt16:
979 ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
980 break;
981 case kCheckedLoadUint16:
982 ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
983 break;
984 case kCheckedLoadWord32:
985 ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
986 break;
987 case kCheckedLoadFloat32:
988 ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
989 break;
990 case kCheckedLoadFloat64:
991 ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
992 break;
993 case kCheckedStoreWord8:
994 ASSEMBLE_CHECKED_STORE_INTEGER(movb);
995 break;
996 case kCheckedStoreWord16:
997 ASSEMBLE_CHECKED_STORE_INTEGER(movw);
998 break;
999 case kCheckedStoreWord32:
1000 ASSEMBLE_CHECKED_STORE_INTEGER(movl);
1001 break;
1002 case kCheckedStoreFloat32:
1003 ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1004 break;
1005 case kCheckedStoreFloat64:
1006 ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1007 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001008 }
1009}
1010
1011
1012// Assembles branches after this instruction.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001013void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001014 X64OperandConverter i(this, instr);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001015 Label::Distance flabel_distance =
1016 branch->fallthru ? Label::kNear : Label::kFar;
1017 Label* tlabel = branch->true_label;
1018 Label* flabel = branch->false_label;
1019 switch (branch->condition) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001020 case kUnorderedEqual:
1021 __ j(parity_even, flabel, flabel_distance);
1022 // Fall through.
1023 case kEqual:
1024 __ j(equal, tlabel);
1025 break;
1026 case kUnorderedNotEqual:
1027 __ j(parity_even, tlabel);
1028 // Fall through.
1029 case kNotEqual:
1030 __ j(not_equal, tlabel);
1031 break;
1032 case kSignedLessThan:
1033 __ j(less, tlabel);
1034 break;
1035 case kSignedGreaterThanOrEqual:
1036 __ j(greater_equal, tlabel);
1037 break;
1038 case kSignedLessThanOrEqual:
1039 __ j(less_equal, tlabel);
1040 break;
1041 case kSignedGreaterThan:
1042 __ j(greater, tlabel);
1043 break;
1044 case kUnorderedLessThan:
1045 __ j(parity_even, flabel, flabel_distance);
1046 // Fall through.
1047 case kUnsignedLessThan:
1048 __ j(below, tlabel);
1049 break;
1050 case kUnorderedGreaterThanOrEqual:
1051 __ j(parity_even, tlabel);
1052 // Fall through.
1053 case kUnsignedGreaterThanOrEqual:
1054 __ j(above_equal, tlabel);
1055 break;
1056 case kUnorderedLessThanOrEqual:
1057 __ j(parity_even, flabel, flabel_distance);
1058 // Fall through.
1059 case kUnsignedLessThanOrEqual:
1060 __ j(below_equal, tlabel);
1061 break;
1062 case kUnorderedGreaterThan:
1063 __ j(parity_even, tlabel);
1064 // Fall through.
1065 case kUnsignedGreaterThan:
1066 __ j(above, tlabel);
1067 break;
1068 case kOverflow:
1069 __ j(overflow, tlabel);
1070 break;
1071 case kNotOverflow:
1072 __ j(no_overflow, tlabel);
1073 break;
1074 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001075 if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1076}
1077
1078
1079void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
1080 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001081}
1082
1083
1084// Assembles boolean materializations after this instruction.
1085void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1086 FlagsCondition condition) {
1087 X64OperandConverter i(this, instr);
1088 Label done;
1089
1090 // Materialize a full 64-bit 1 or 0 value. The result register is always the
1091 // last output of the instruction.
1092 Label check;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001093 DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001094 Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
1095 Condition cc = no_condition;
1096 switch (condition) {
1097 case kUnorderedEqual:
1098 __ j(parity_odd, &check, Label::kNear);
1099 __ movl(reg, Immediate(0));
1100 __ jmp(&done, Label::kNear);
1101 // Fall through.
1102 case kEqual:
1103 cc = equal;
1104 break;
1105 case kUnorderedNotEqual:
1106 __ j(parity_odd, &check, Label::kNear);
1107 __ movl(reg, Immediate(1));
1108 __ jmp(&done, Label::kNear);
1109 // Fall through.
1110 case kNotEqual:
1111 cc = not_equal;
1112 break;
1113 case kSignedLessThan:
1114 cc = less;
1115 break;
1116 case kSignedGreaterThanOrEqual:
1117 cc = greater_equal;
1118 break;
1119 case kSignedLessThanOrEqual:
1120 cc = less_equal;
1121 break;
1122 case kSignedGreaterThan:
1123 cc = greater;
1124 break;
1125 case kUnorderedLessThan:
1126 __ j(parity_odd, &check, Label::kNear);
1127 __ movl(reg, Immediate(0));
1128 __ jmp(&done, Label::kNear);
1129 // Fall through.
1130 case kUnsignedLessThan:
1131 cc = below;
1132 break;
1133 case kUnorderedGreaterThanOrEqual:
1134 __ j(parity_odd, &check, Label::kNear);
1135 __ movl(reg, Immediate(1));
1136 __ jmp(&done, Label::kNear);
1137 // Fall through.
1138 case kUnsignedGreaterThanOrEqual:
1139 cc = above_equal;
1140 break;
1141 case kUnorderedLessThanOrEqual:
1142 __ j(parity_odd, &check, Label::kNear);
1143 __ movl(reg, Immediate(0));
1144 __ jmp(&done, Label::kNear);
1145 // Fall through.
1146 case kUnsignedLessThanOrEqual:
1147 cc = below_equal;
1148 break;
1149 case kUnorderedGreaterThan:
1150 __ j(parity_odd, &check, Label::kNear);
1151 __ movl(reg, Immediate(1));
1152 __ jmp(&done, Label::kNear);
1153 // Fall through.
1154 case kUnsignedGreaterThan:
1155 cc = above;
1156 break;
1157 case kOverflow:
1158 cc = overflow;
1159 break;
1160 case kNotOverflow:
1161 cc = no_overflow;
1162 break;
1163 }
1164 __ bind(&check);
1165 __ setcc(cc, reg);
1166 __ movzxbl(reg, reg);
1167 __ bind(&done);
1168}
1169
1170
1171void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
1172 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1173 isolate(), deoptimization_id, Deoptimizer::LAZY);
1174 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1175}
1176
1177
1178void CodeGenerator::AssemblePrologue() {
1179 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1180 int stack_slots = frame()->GetSpillSlotCount();
1181 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1182 __ pushq(rbp);
1183 __ movq(rbp, rsp);
1184 const RegList saves = descriptor->CalleeSavedRegisters();
1185 if (saves != 0) { // Save callee-saved registers.
1186 int register_save_area_size = 0;
1187 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1188 if (!((1 << i) & saves)) continue;
1189 __ pushq(Register::from_code(i));
1190 register_save_area_size += kPointerSize;
1191 }
1192 frame()->SetRegisterSaveAreaSize(register_save_area_size);
1193 }
1194 } else if (descriptor->IsJSFunctionCall()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001195 CompilationInfo* info = this->info();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001196 __ Prologue(info->IsCodePreAgingActive());
1197 frame()->SetRegisterSaveAreaSize(
1198 StandardFrameConstants::kFixedFrameSizeFromFp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001199 } else {
1200 __ StubPrologue();
1201 frame()->SetRegisterSaveAreaSize(
1202 StandardFrameConstants::kFixedFrameSizeFromFp);
1203 }
1204 if (stack_slots > 0) {
1205 __ subq(rsp, Immediate(stack_slots * kPointerSize));
1206 }
1207}
1208
1209
1210void CodeGenerator::AssembleReturn() {
1211 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1212 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1213 if (frame()->GetRegisterSaveAreaSize() > 0) {
1214 // Remove this frame's spill slots first.
1215 int stack_slots = frame()->GetSpillSlotCount();
1216 if (stack_slots > 0) {
1217 __ addq(rsp, Immediate(stack_slots * kPointerSize));
1218 }
1219 const RegList saves = descriptor->CalleeSavedRegisters();
1220 // Restore registers.
1221 if (saves != 0) {
1222 for (int i = 0; i < Register::kNumRegisters; i++) {
1223 if (!((1 << i) & saves)) continue;
1224 __ popq(Register::from_code(i));
1225 }
1226 }
1227 __ popq(rbp); // Pop caller's frame pointer.
1228 __ ret(0);
1229 } else {
1230 // No saved registers.
1231 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1232 __ popq(rbp); // Pop caller's frame pointer.
1233 __ ret(0);
1234 }
1235 } else {
1236 __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
1237 __ popq(rbp); // Pop caller's frame pointer.
1238 int pop_count = descriptor->IsJSFunctionCall()
1239 ? static_cast<int>(descriptor->JSParameterCount())
1240 : 0;
1241 __ ret(pop_count * kPointerSize);
1242 }
1243}
1244
1245
1246void CodeGenerator::AssembleMove(InstructionOperand* source,
1247 InstructionOperand* destination) {
1248 X64OperandConverter g(this, NULL);
1249 // Dispatch on the source and destination operand kinds. Not all
1250 // combinations are possible.
1251 if (source->IsRegister()) {
1252 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1253 Register src = g.ToRegister(source);
1254 if (destination->IsRegister()) {
1255 __ movq(g.ToRegister(destination), src);
1256 } else {
1257 __ movq(g.ToOperand(destination), src);
1258 }
1259 } else if (source->IsStackSlot()) {
1260 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1261 Operand src = g.ToOperand(source);
1262 if (destination->IsRegister()) {
1263 Register dst = g.ToRegister(destination);
1264 __ movq(dst, src);
1265 } else {
1266 // Spill on demand to use a temporary register for memory-to-memory
1267 // moves.
1268 Register tmp = kScratchRegister;
1269 Operand dst = g.ToOperand(destination);
1270 __ movq(tmp, src);
1271 __ movq(dst, tmp);
1272 }
1273 } else if (source->IsConstant()) {
1274 ConstantOperand* constant_source = ConstantOperand::cast(source);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001275 Constant src = g.ToConstant(constant_source);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001276 if (destination->IsRegister() || destination->IsStackSlot()) {
1277 Register dst = destination->IsRegister() ? g.ToRegister(destination)
1278 : kScratchRegister;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001279 switch (src.type()) {
1280 case Constant::kInt32:
1281 // TODO(dcarney): don't need scratch in this case.
1282 __ Set(dst, src.ToInt32());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001283 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001284 case Constant::kInt64:
1285 __ Set(dst, src.ToInt64());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001286 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001287 case Constant::kFloat32:
1288 __ Move(dst,
1289 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1290 break;
1291 case Constant::kFloat64:
1292 __ Move(dst,
1293 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1294 break;
1295 case Constant::kExternalReference:
1296 __ Move(dst, src.ToExternalReference());
1297 break;
1298 case Constant::kHeapObject:
1299 __ Move(dst, src.ToHeapObject());
1300 break;
1301 case Constant::kRpoNumber:
1302 UNREACHABLE(); // TODO(dcarney): load of labels on x64.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001303 break;
1304 }
1305 if (destination->IsStackSlot()) {
1306 __ movq(g.ToOperand(destination), kScratchRegister);
1307 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001308 } else if (src.type() == Constant::kFloat32) {
1309 // TODO(turbofan): Can we do better here?
1310 uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001311 if (destination->IsDoubleRegister()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001312 __ Move(g.ToDoubleRegister(destination), src_const);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001313 } else {
1314 DCHECK(destination->IsDoubleStackSlot());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001315 Operand dst = g.ToOperand(destination);
1316 __ movl(dst, Immediate(src_const));
1317 }
1318 } else {
1319 DCHECK_EQ(Constant::kFloat64, src.type());
1320 uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
1321 if (destination->IsDoubleRegister()) {
1322 __ Move(g.ToDoubleRegister(destination), src_const);
1323 } else {
1324 DCHECK(destination->IsDoubleStackSlot());
1325 __ movq(kScratchRegister, src_const);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001326 __ movq(g.ToOperand(destination), kScratchRegister);
1327 }
1328 }
1329 } else if (source->IsDoubleRegister()) {
1330 XMMRegister src = g.ToDoubleRegister(source);
1331 if (destination->IsDoubleRegister()) {
1332 XMMRegister dst = g.ToDoubleRegister(destination);
1333 __ movsd(dst, src);
1334 } else {
1335 DCHECK(destination->IsDoubleStackSlot());
1336 Operand dst = g.ToOperand(destination);
1337 __ movsd(dst, src);
1338 }
1339 } else if (source->IsDoubleStackSlot()) {
1340 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1341 Operand src = g.ToOperand(source);
1342 if (destination->IsDoubleRegister()) {
1343 XMMRegister dst = g.ToDoubleRegister(destination);
1344 __ movsd(dst, src);
1345 } else {
1346 // We rely on having xmm0 available as a fixed scratch register.
1347 Operand dst = g.ToOperand(destination);
1348 __ movsd(xmm0, src);
1349 __ movsd(dst, xmm0);
1350 }
1351 } else {
1352 UNREACHABLE();
1353 }
1354}
1355
1356
1357void CodeGenerator::AssembleSwap(InstructionOperand* source,
1358 InstructionOperand* destination) {
1359 X64OperandConverter g(this, NULL);
1360 // Dispatch on the source and destination operand kinds. Not all
1361 // combinations are possible.
1362 if (source->IsRegister() && destination->IsRegister()) {
1363 // Register-register.
1364 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1365 } else if (source->IsRegister() && destination->IsStackSlot()) {
1366 Register src = g.ToRegister(source);
1367 Operand dst = g.ToOperand(destination);
1368 __ xchgq(src, dst);
1369 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1370 (source->IsDoubleStackSlot() &&
1371 destination->IsDoubleStackSlot())) {
1372 // Memory-memory.
1373 Register tmp = kScratchRegister;
1374 Operand src = g.ToOperand(source);
1375 Operand dst = g.ToOperand(destination);
1376 __ movq(tmp, dst);
1377 __ xchgq(tmp, src);
1378 __ movq(dst, tmp);
1379 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1380 // XMM register-register swap. We rely on having xmm0
1381 // available as a fixed scratch register.
1382 XMMRegister src = g.ToDoubleRegister(source);
1383 XMMRegister dst = g.ToDoubleRegister(destination);
1384 __ movsd(xmm0, src);
1385 __ movsd(src, dst);
1386 __ movsd(dst, xmm0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001387 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001388 // XMM register-memory swap. We rely on having xmm0
1389 // available as a fixed scratch register.
1390 XMMRegister src = g.ToDoubleRegister(source);
1391 Operand dst = g.ToOperand(destination);
1392 __ movsd(xmm0, src);
1393 __ movsd(src, dst);
1394 __ movsd(dst, xmm0);
1395 } else {
1396 // No other combinations are possible.
1397 UNREACHABLE();
1398 }
1399}
1400
1401
1402void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1403
1404
1405void CodeGenerator::EnsureSpaceForLazyDeopt() {
1406 int space_needed = Deoptimizer::patch_size();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001407 if (!info()->IsStub()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001408 // Ensure that we have enough space after the previous lazy-bailout
1409 // instruction for patching the code here.
1410 int current_pc = masm()->pc_offset();
1411 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1412 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1413 __ Nop(padding_size);
1414 }
1415 }
1416 MarkLazyDeoptSite();
1417}
1418
1419#undef __
1420
1421} // namespace internal
1422} // namespace compiler
1423} // namespace v8