blob: a7b7246d3d88cebb217ea8012416a6fdca382719 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
7#include "src/ast/scopes.h"
8#include "src/compiler/code-generator-impl.h"
9#include "src/compiler/gap-resolver.h"
10#include "src/compiler/node-matchers.h"
11#include "src/compiler/osr.h"
12#include "src/x87/assembler-x87.h"
13#include "src/x87/frames-x87.h"
14#include "src/x87/macro-assembler-x87.h"
15
16namespace v8 {
17namespace internal {
18namespace compiler {
19
20#define __ masm()->
21
22
23// Adds X87 specific methods for decoding operands.
24class X87OperandConverter : public InstructionOperandConverter {
25 public:
26 X87OperandConverter(CodeGenerator* gen, Instruction* instr)
27 : InstructionOperandConverter(gen, instr) {}
28
29 Operand InputOperand(size_t index, int extra = 0) {
30 return ToOperand(instr_->InputAt(index), extra);
31 }
32
33 Immediate InputImmediate(size_t index) {
34 return ToImmediate(instr_->InputAt(index));
35 }
36
37 Operand OutputOperand() { return ToOperand(instr_->Output()); }
38
39 Operand ToOperand(InstructionOperand* op, int extra = 0) {
40 if (op->IsRegister()) {
41 DCHECK(extra == 0);
42 return Operand(ToRegister(op));
43 }
44 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
45 FrameOffset offset = frame_access_state()->GetFrameOffset(
46 AllocatedOperand::cast(op)->index());
47 return Operand(offset.from_stack_pointer() ? esp : ebp,
48 offset.offset() + extra);
49 }
50
51 Operand ToMaterializableOperand(int materializable_offset) {
52 FrameOffset offset = frame_access_state()->GetFrameOffset(
53 Frame::FPOffsetToSlot(materializable_offset));
54 return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
55 }
56
57 Operand HighOperand(InstructionOperand* op) {
58 DCHECK(op->IsDoubleStackSlot());
59 return ToOperand(op, kPointerSize);
60 }
61
62 Immediate ToImmediate(InstructionOperand* operand) {
63 Constant constant = ToConstant(operand);
64 switch (constant.type()) {
65 case Constant::kInt32:
66 return Immediate(constant.ToInt32());
67 case Constant::kFloat32:
68 return Immediate(
69 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
70 case Constant::kFloat64:
71 return Immediate(
72 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
73 case Constant::kExternalReference:
74 return Immediate(constant.ToExternalReference());
75 case Constant::kHeapObject:
76 return Immediate(constant.ToHeapObject());
77 case Constant::kInt64:
78 break;
79 case Constant::kRpoNumber:
80 return Immediate::CodeRelativeOffset(ToLabel(operand));
81 }
82 UNREACHABLE();
83 return Immediate(-1);
84 }
85
86 static size_t NextOffset(size_t* offset) {
87 size_t i = *offset;
88 (*offset)++;
89 return i;
90 }
91
92 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
93 STATIC_ASSERT(0 == static_cast<int>(times_1));
94 STATIC_ASSERT(1 == static_cast<int>(times_2));
95 STATIC_ASSERT(2 == static_cast<int>(times_4));
96 STATIC_ASSERT(3 == static_cast<int>(times_8));
97 int scale = static_cast<int>(mode - one);
98 DCHECK(scale >= 0 && scale < 4);
99 return static_cast<ScaleFactor>(scale);
100 }
101
102 Operand MemoryOperand(size_t* offset) {
103 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
104 switch (mode) {
105 case kMode_MR: {
106 Register base = InputRegister(NextOffset(offset));
107 int32_t disp = 0;
108 return Operand(base, disp);
109 }
110 case kMode_MRI: {
111 Register base = InputRegister(NextOffset(offset));
112 int32_t disp = InputInt32(NextOffset(offset));
113 return Operand(base, disp);
114 }
115 case kMode_MR1:
116 case kMode_MR2:
117 case kMode_MR4:
118 case kMode_MR8: {
119 Register base = InputRegister(NextOffset(offset));
120 Register index = InputRegister(NextOffset(offset));
121 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
122 int32_t disp = 0;
123 return Operand(base, index, scale, disp);
124 }
125 case kMode_MR1I:
126 case kMode_MR2I:
127 case kMode_MR4I:
128 case kMode_MR8I: {
129 Register base = InputRegister(NextOffset(offset));
130 Register index = InputRegister(NextOffset(offset));
131 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
132 int32_t disp = InputInt32(NextOffset(offset));
133 return Operand(base, index, scale, disp);
134 }
135 case kMode_M1:
136 case kMode_M2:
137 case kMode_M4:
138 case kMode_M8: {
139 Register index = InputRegister(NextOffset(offset));
140 ScaleFactor scale = ScaleFor(kMode_M1, mode);
141 int32_t disp = 0;
142 return Operand(index, scale, disp);
143 }
144 case kMode_M1I:
145 case kMode_M2I:
146 case kMode_M4I:
147 case kMode_M8I: {
148 Register index = InputRegister(NextOffset(offset));
149 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
150 int32_t disp = InputInt32(NextOffset(offset));
151 return Operand(index, scale, disp);
152 }
153 case kMode_MI: {
154 int32_t disp = InputInt32(NextOffset(offset));
155 return Operand(Immediate(disp));
156 }
157 case kMode_None:
158 UNREACHABLE();
159 return Operand(no_reg, 0);
160 }
161 UNREACHABLE();
162 return Operand(no_reg, 0);
163 }
164
165 Operand MemoryOperand(size_t first_input = 0) {
166 return MemoryOperand(&first_input);
167 }
168};
169
170
171namespace {
172
173bool HasImmediateInput(Instruction* instr, size_t index) {
174 return instr->InputAt(index)->IsImmediate();
175}
176
177
178class OutOfLineLoadInteger final : public OutOfLineCode {
179 public:
180 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
181 : OutOfLineCode(gen), result_(result) {}
182
183 void Generate() final { __ xor_(result_, result_); }
184
185 private:
186 Register const result_;
187};
188
189
190class OutOfLineLoadFloat final : public OutOfLineCode {
191 public:
192 OutOfLineLoadFloat(CodeGenerator* gen, X87Register result)
193 : OutOfLineCode(gen), result_(result) {}
194
195 void Generate() final {
196 DCHECK(result_.code() == 0);
197 USE(result_);
198 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
199 __ VerifyX87StackDepth(1);
200 }
201 __ fstp(0);
202 __ push(Immediate(0xffffffff));
203 __ push(Immediate(0x7fffffff));
204 __ fld_d(MemOperand(esp, 0));
205 __ lea(esp, Operand(esp, kDoubleSize));
206 }
207
208 private:
209 X87Register const result_;
210};
211
212
213class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
214 public:
215 OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
216 X87Register input)
217 : OutOfLineCode(gen), result_(result), input_(input) {}
218
219 void Generate() final {
220 UNIMPLEMENTED();
221 USE(result_);
222 USE(input_);
223 }
224
225 private:
226 Register const result_;
227 X87Register const input_;
228};
229
230
231class OutOfLineRecordWrite final : public OutOfLineCode {
232 public:
233 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
234 Register value, Register scratch0, Register scratch1,
235 RecordWriteMode mode)
236 : OutOfLineCode(gen),
237 object_(object),
238 operand_(operand),
239 value_(value),
240 scratch0_(scratch0),
241 scratch1_(scratch1),
242 mode_(mode) {}
243
244 void Generate() final {
245 if (mode_ > RecordWriteMode::kValueIsPointer) {
246 __ JumpIfSmi(value_, exit());
247 }
248 if (mode_ > RecordWriteMode::kValueIsMap) {
249 __ CheckPageFlag(value_, scratch0_,
250 MemoryChunk::kPointersToHereAreInterestingMask, zero,
251 exit());
252 }
253 SaveFPRegsMode const save_fp_mode =
254 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
255 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
256 EMIT_REMEMBERED_SET, save_fp_mode);
257 __ lea(scratch1_, operand_);
258 __ CallStub(&stub);
259 }
260
261 private:
262 Register const object_;
263 Operand const operand_;
264 Register const value_;
265 Register const scratch0_;
266 Register const scratch1_;
267 RecordWriteMode const mode_;
268};
269
270} // namespace
271
272
273#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
274 do { \
275 auto result = i.OutputDoubleRegister(); \
276 auto offset = i.InputRegister(0); \
277 DCHECK(result.code() == 0); \
278 if (instr->InputAt(1)->IsRegister()) { \
279 __ cmp(offset, i.InputRegister(1)); \
280 } else { \
281 __ cmp(offset, i.InputImmediate(1)); \
282 } \
283 OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
284 __ j(above_equal, ool->entry()); \
285 __ fstp(0); \
286 __ asm_instr(i.MemoryOperand(2)); \
287 __ bind(ool->exit()); \
288 } while (false)
289
290
291#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
292 do { \
293 auto result = i.OutputRegister(); \
294 auto offset = i.InputRegister(0); \
295 if (instr->InputAt(1)->IsRegister()) { \
296 __ cmp(offset, i.InputRegister(1)); \
297 } else { \
298 __ cmp(offset, i.InputImmediate(1)); \
299 } \
300 OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
301 __ j(above_equal, ool->entry()); \
302 __ asm_instr(result, i.MemoryOperand(2)); \
303 __ bind(ool->exit()); \
304 } while (false)
305
306
307#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
308 do { \
309 auto offset = i.InputRegister(0); \
310 if (instr->InputAt(1)->IsRegister()) { \
311 __ cmp(offset, i.InputRegister(1)); \
312 } else { \
313 __ cmp(offset, i.InputImmediate(1)); \
314 } \
315 Label done; \
316 DCHECK(i.InputDoubleRegister(2).code() == 0); \
317 __ j(above_equal, &done, Label::kNear); \
318 __ asm_instr(i.MemoryOperand(3)); \
319 __ bind(&done); \
320 } while (false)
321
322
323#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
324 do { \
325 auto offset = i.InputRegister(0); \
326 if (instr->InputAt(1)->IsRegister()) { \
327 __ cmp(offset, i.InputRegister(1)); \
328 } else { \
329 __ cmp(offset, i.InputImmediate(1)); \
330 } \
331 Label done; \
332 __ j(above_equal, &done, Label::kNear); \
333 if (instr->InputAt(2)->IsRegister()) { \
334 __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
335 } else { \
336 __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
337 } \
338 __ bind(&done); \
339 } while (false)
340
341
342void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
343 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
344 if (sp_slot_delta > 0) {
345 __ add(esp, Immediate(sp_slot_delta * kPointerSize));
346 }
347 frame_access_state()->SetFrameAccessToDefault();
348}
349
350
351void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
352 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
353 if (sp_slot_delta < 0) {
354 __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
355 frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
356 }
357 if (frame()->needs_frame()) {
358 __ mov(ebp, MemOperand(ebp, 0));
359 }
360 frame_access_state()->SetFrameAccessToSP();
361}
362
363
364// Assembles an instruction after register allocation, producing machine code.
365void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
366 X87OperandConverter i(this, instr);
367
368 switch (ArchOpcodeField::decode(instr->opcode())) {
369 case kArchCallCodeObject: {
370 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
371 __ VerifyX87StackDepth(1);
372 }
373 __ fstp(0);
374 EnsureSpaceForLazyDeopt();
375 if (HasImmediateInput(instr, 0)) {
376 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
377 __ call(code, RelocInfo::CODE_TARGET);
378 } else {
379 Register reg = i.InputRegister(0);
380 __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
381 __ call(reg);
382 }
383 RecordCallPosition(instr);
384 bool double_result =
385 instr->HasOutput() && instr->Output()->IsDoubleRegister();
386 if (double_result) {
387 __ lea(esp, Operand(esp, -kDoubleSize));
388 __ fstp_d(Operand(esp, 0));
389 }
390 __ fninit();
391 if (double_result) {
392 __ fld_d(Operand(esp, 0));
393 __ lea(esp, Operand(esp, kDoubleSize));
394 } else {
395 __ fld1();
396 }
397 frame_access_state()->ClearSPDelta();
398 break;
399 }
400 case kArchTailCallCodeObject: {
401 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
402 __ VerifyX87StackDepth(1);
403 }
404 __ fstp(0);
405 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
406 AssembleDeconstructActivationRecord(stack_param_delta);
407 if (HasImmediateInput(instr, 0)) {
408 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
409 __ jmp(code, RelocInfo::CODE_TARGET);
410 } else {
411 Register reg = i.InputRegister(0);
412 __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
413 __ jmp(reg);
414 }
415 frame_access_state()->ClearSPDelta();
416 break;
417 }
418 case kArchCallJSFunction: {
419 EnsureSpaceForLazyDeopt();
420 Register func = i.InputRegister(0);
421 if (FLAG_debug_code) {
422 // Check the function's context matches the context argument.
423 __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
424 __ Assert(equal, kWrongFunctionContext);
425 }
426 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
427 __ VerifyX87StackDepth(1);
428 }
429 __ fstp(0);
430 __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
431 RecordCallPosition(instr);
432 bool double_result =
433 instr->HasOutput() && instr->Output()->IsDoubleRegister();
434 if (double_result) {
435 __ lea(esp, Operand(esp, -kDoubleSize));
436 __ fstp_d(Operand(esp, 0));
437 }
438 __ fninit();
439 if (double_result) {
440 __ fld_d(Operand(esp, 0));
441 __ lea(esp, Operand(esp, kDoubleSize));
442 } else {
443 __ fld1();
444 }
445 frame_access_state()->ClearSPDelta();
446 break;
447 }
448 case kArchTailCallJSFunction: {
449 Register func = i.InputRegister(0);
450 if (FLAG_debug_code) {
451 // Check the function's context matches the context argument.
452 __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
453 __ Assert(equal, kWrongFunctionContext);
454 }
455 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
456 __ VerifyX87StackDepth(1);
457 }
458 __ fstp(0);
459 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
460 AssembleDeconstructActivationRecord(stack_param_delta);
461 __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
462 frame_access_state()->ClearSPDelta();
463 break;
464 }
465 case kArchLazyBailout: {
466 EnsureSpaceForLazyDeopt();
467 RecordCallPosition(instr);
468 // Lazy Bailout entry, need to re-initialize FPU state.
469 __ fninit();
470 __ fld1();
471 break;
472 }
473 case kArchPrepareCallCFunction: {
474 // Frame alignment requires using FP-relative frame addressing.
475 frame_access_state()->SetFrameAccessToFP();
476 int const num_parameters = MiscField::decode(instr->opcode());
477 __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
478 break;
479 }
480 case kArchPrepareTailCall:
481 AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
482 break;
483 case kArchCallCFunction: {
484 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
485 __ VerifyX87StackDepth(1);
486 }
487 __ fstp(0);
488 int const num_parameters = MiscField::decode(instr->opcode());
489 if (HasImmediateInput(instr, 0)) {
490 ExternalReference ref = i.InputExternalReference(0);
491 __ CallCFunction(ref, num_parameters);
492 } else {
493 Register func = i.InputRegister(0);
494 __ CallCFunction(func, num_parameters);
495 }
496 bool double_result =
497 instr->HasOutput() && instr->Output()->IsDoubleRegister();
498 if (double_result) {
499 __ lea(esp, Operand(esp, -kDoubleSize));
500 __ fstp_d(Operand(esp, 0));
501 }
502 __ fninit();
503 if (double_result) {
504 __ fld_d(Operand(esp, 0));
505 __ lea(esp, Operand(esp, kDoubleSize));
506 } else {
507 __ fld1();
508 }
509 frame_access_state()->SetFrameAccessToDefault();
510 frame_access_state()->ClearSPDelta();
511 break;
512 }
513 case kArchJmp:
514 AssembleArchJump(i.InputRpo(0));
515 break;
516 case kArchLookupSwitch:
517 AssembleArchLookupSwitch(instr);
518 break;
519 case kArchTableSwitch:
520 AssembleArchTableSwitch(instr);
521 break;
522 case kArchNop:
523 case kArchThrowTerminator:
524 // don't emit code for nops.
525 break;
526 case kArchDeoptimize: {
527 int deopt_state_id =
528 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
529 int double_register_param_count = 0;
530 int x87_layout = 0;
531 for (size_t i = 0; i < instr->InputCount(); i++) {
532 if (instr->InputAt(i)->IsDoubleRegister()) {
533 double_register_param_count++;
534 }
535 }
536 // Currently we use only one X87 register. If double_register_param_count
537 // is bigger than 1, it means duplicated double register is added to input
538 // of this instruction.
539 if (double_register_param_count > 0) {
540 x87_layout = (0 << 3) | 1;
541 }
542 // The layout of x87 register stack is loaded on the top of FPU register
543 // stack for deoptimization.
544 __ push(Immediate(x87_layout));
545 __ fild_s(MemOperand(esp, 0));
546 __ lea(esp, Operand(esp, kPointerSize));
547
548 Deoptimizer::BailoutType bailout_type =
549 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
550 AssembleDeoptimizerCall(deopt_state_id, bailout_type);
551 break;
552 }
553 case kArchRet:
554 AssembleReturn();
555 break;
556 case kArchFramePointer:
557 __ mov(i.OutputRegister(), ebp);
558 break;
559 case kArchStackPointer:
560 __ mov(i.OutputRegister(), esp);
561 break;
562 case kArchTruncateDoubleToI: {
563 if (!instr->InputAt(0)->IsDoubleRegister()) {
564 __ fld_d(i.InputOperand(0));
565 }
566 __ TruncateX87TOSToI(i.OutputRegister());
567 if (!instr->InputAt(0)->IsDoubleRegister()) {
568 __ fstp(0);
569 }
570 break;
571 }
572 case kArchStoreWithWriteBarrier: {
573 RecordWriteMode mode =
574 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
575 Register object = i.InputRegister(0);
576 size_t index = 0;
577 Operand operand = i.MemoryOperand(&index);
578 Register value = i.InputRegister(index);
579 Register scratch0 = i.TempRegister(0);
580 Register scratch1 = i.TempRegister(1);
581 auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
582 scratch0, scratch1, mode);
583 __ mov(operand, value);
584 __ CheckPageFlag(object, scratch0,
585 MemoryChunk::kPointersFromHereAreInterestingMask,
586 not_zero, ool->entry());
587 __ bind(ool->exit());
588 break;
589 }
590 case kX87Add:
591 if (HasImmediateInput(instr, 1)) {
592 __ add(i.InputOperand(0), i.InputImmediate(1));
593 } else {
594 __ add(i.InputRegister(0), i.InputOperand(1));
595 }
596 break;
597 case kX87And:
598 if (HasImmediateInput(instr, 1)) {
599 __ and_(i.InputOperand(0), i.InputImmediate(1));
600 } else {
601 __ and_(i.InputRegister(0), i.InputOperand(1));
602 }
603 break;
604 case kX87Cmp:
605 if (HasImmediateInput(instr, 1)) {
606 __ cmp(i.InputOperand(0), i.InputImmediate(1));
607 } else {
608 __ cmp(i.InputRegister(0), i.InputOperand(1));
609 }
610 break;
611 case kX87Test:
612 if (HasImmediateInput(instr, 1)) {
613 __ test(i.InputOperand(0), i.InputImmediate(1));
614 } else {
615 __ test(i.InputRegister(0), i.InputOperand(1));
616 }
617 break;
618 case kX87Imul:
619 if (HasImmediateInput(instr, 1)) {
620 __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
621 } else {
622 __ imul(i.OutputRegister(), i.InputOperand(1));
623 }
624 break;
625 case kX87ImulHigh:
626 __ imul(i.InputRegister(1));
627 break;
628 case kX87UmulHigh:
629 __ mul(i.InputRegister(1));
630 break;
631 case kX87Idiv:
632 __ cdq();
633 __ idiv(i.InputOperand(1));
634 break;
635 case kX87Udiv:
636 __ Move(edx, Immediate(0));
637 __ div(i.InputOperand(1));
638 break;
639 case kX87Not:
640 __ not_(i.OutputOperand());
641 break;
642 case kX87Neg:
643 __ neg(i.OutputOperand());
644 break;
645 case kX87Or:
646 if (HasImmediateInput(instr, 1)) {
647 __ or_(i.InputOperand(0), i.InputImmediate(1));
648 } else {
649 __ or_(i.InputRegister(0), i.InputOperand(1));
650 }
651 break;
652 case kX87Xor:
653 if (HasImmediateInput(instr, 1)) {
654 __ xor_(i.InputOperand(0), i.InputImmediate(1));
655 } else {
656 __ xor_(i.InputRegister(0), i.InputOperand(1));
657 }
658 break;
659 case kX87Sub:
660 if (HasImmediateInput(instr, 1)) {
661 __ sub(i.InputOperand(0), i.InputImmediate(1));
662 } else {
663 __ sub(i.InputRegister(0), i.InputOperand(1));
664 }
665 break;
666 case kX87Shl:
667 if (HasImmediateInput(instr, 1)) {
668 __ shl(i.OutputOperand(), i.InputInt5(1));
669 } else {
670 __ shl_cl(i.OutputOperand());
671 }
672 break;
673 case kX87Shr:
674 if (HasImmediateInput(instr, 1)) {
675 __ shr(i.OutputOperand(), i.InputInt5(1));
676 } else {
677 __ shr_cl(i.OutputOperand());
678 }
679 break;
680 case kX87Sar:
681 if (HasImmediateInput(instr, 1)) {
682 __ sar(i.OutputOperand(), i.InputInt5(1));
683 } else {
684 __ sar_cl(i.OutputOperand());
685 }
686 break;
687 case kX87Ror:
688 if (HasImmediateInput(instr, 1)) {
689 __ ror(i.OutputOperand(), i.InputInt5(1));
690 } else {
691 __ ror_cl(i.OutputOperand());
692 }
693 break;
694 case kX87Lzcnt:
695 __ Lzcnt(i.OutputRegister(), i.InputOperand(0));
696 break;
697 case kX87Popcnt:
698 __ Popcnt(i.OutputRegister(), i.InputOperand(0));
699 break;
700 case kX87LoadFloat64Constant: {
701 InstructionOperand* source = instr->InputAt(0);
702 InstructionOperand* destination = instr->Output();
703 DCHECK(source->IsConstant());
704 X87OperandConverter g(this, nullptr);
705 Constant src_constant = g.ToConstant(source);
706
707 DCHECK_EQ(Constant::kFloat64, src_constant.type());
708 uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
709 uint32_t lower = static_cast<uint32_t>(src);
710 uint32_t upper = static_cast<uint32_t>(src >> 32);
711 if (destination->IsDoubleRegister()) {
712 __ sub(esp, Immediate(kDoubleSize));
713 __ mov(MemOperand(esp, 0), Immediate(lower));
714 __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
715 __ fstp(0);
716 __ fld_d(MemOperand(esp, 0));
717 __ add(esp, Immediate(kDoubleSize));
718 } else {
719 UNREACHABLE();
720 }
721 break;
722 }
723 case kX87Float32Cmp: {
724 __ fld_s(MemOperand(esp, kFloatSize));
725 __ fld_s(MemOperand(esp, 0));
726 __ FCmp();
727 __ lea(esp, Operand(esp, 2 * kFloatSize));
728 break;
729 }
730 case kX87Float32Add: {
731 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
732 __ VerifyX87StackDepth(1);
733 }
734 __ X87SetFPUCW(0x027F);
735 __ fstp(0);
736 __ fld_s(MemOperand(esp, 0));
737 __ fld_s(MemOperand(esp, kFloatSize));
738 __ faddp();
739 // Clear stack.
740 __ lea(esp, Operand(esp, 2 * kFloatSize));
741 // Restore the default value of control word.
742 __ X87SetFPUCW(0x037F);
743 break;
744 }
745 case kX87Float32Sub: {
746 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
747 __ VerifyX87StackDepth(1);
748 }
749 __ X87SetFPUCW(0x027F);
750 __ fstp(0);
751 __ fld_s(MemOperand(esp, kFloatSize));
752 __ fld_s(MemOperand(esp, 0));
753 __ fsubp();
754 // Clear stack.
755 __ lea(esp, Operand(esp, 2 * kFloatSize));
756 // Restore the default value of control word.
757 __ X87SetFPUCW(0x037F);
758 break;
759 }
760 case kX87Float32Mul: {
761 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
762 __ VerifyX87StackDepth(1);
763 }
764 __ X87SetFPUCW(0x027F);
765 __ fstp(0);
766 __ fld_s(MemOperand(esp, kFloatSize));
767 __ fld_s(MemOperand(esp, 0));
768 __ fmulp();
769 // Clear stack.
770 __ lea(esp, Operand(esp, 2 * kFloatSize));
771 // Restore the default value of control word.
772 __ X87SetFPUCW(0x037F);
773 break;
774 }
775 case kX87Float32Div: {
776 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
777 __ VerifyX87StackDepth(1);
778 }
779 __ X87SetFPUCW(0x027F);
780 __ fstp(0);
781 __ fld_s(MemOperand(esp, kFloatSize));
782 __ fld_s(MemOperand(esp, 0));
783 __ fdivp();
784 // Clear stack.
785 __ lea(esp, Operand(esp, 2 * kFloatSize));
786 // Restore the default value of control word.
787 __ X87SetFPUCW(0x037F);
788 break;
789 }
790 case kX87Float32Max: {
791 Label check_nan_left, check_zero, return_left, return_right;
792 Condition condition = below;
793 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
794 __ VerifyX87StackDepth(1);
795 }
796 __ fstp(0);
797 __ fld_s(MemOperand(esp, kFloatSize));
798 __ fld_s(MemOperand(esp, 0));
799 __ fld(1);
800 __ fld(1);
801 __ FCmp();
802
803 // At least one NaN.
804 // Return the second operands if one of the two operands is NaN
805 __ j(parity_even, &return_right, Label::kNear);
806 __ j(equal, &check_zero, Label::kNear); // left == right.
807 __ j(condition, &return_left, Label::kNear);
808 __ jmp(&return_right, Label::kNear);
809
810 __ bind(&check_zero);
811 __ fld(0);
812 __ fldz();
813 __ FCmp();
814 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
815
816 __ fadd(1);
817 __ jmp(&return_left, Label::kNear);
818
819 __ bind(&return_right);
820 __ fxch();
821
822 __ bind(&return_left);
823 __ fstp(0);
824 __ lea(esp, Operand(esp, 2 * kFloatSize));
825 break;
826 }
827 case kX87Float32Min: {
828 Label check_nan_left, check_zero, return_left, return_right;
829 Condition condition = above;
830 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
831 __ VerifyX87StackDepth(1);
832 }
833 __ fstp(0);
834 __ fld_s(MemOperand(esp, kFloatSize));
835 __ fld_s(MemOperand(esp, 0));
836 __ fld(1);
837 __ fld(1);
838 __ FCmp();
839 // At least one NaN.
840 // Return the second operands if one of the two operands is NaN
841 __ j(parity_even, &return_right, Label::kNear);
842 __ j(equal, &check_zero, Label::kNear); // left == right.
843 __ j(condition, &return_left, Label::kNear);
844 __ jmp(&return_right, Label::kNear);
845
846 __ bind(&check_zero);
847 __ fld(0);
848 __ fldz();
849 __ FCmp();
850 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
851 // At this point, both left and right are either 0 or -0.
852 // Push st0 and st1 to stack, then pop them to temp registers and OR them,
853 // load it to left.
854 __ push(eax);
855 __ fld(1);
856 __ fld(1);
857 __ sub(esp, Immediate(2 * kPointerSize));
858 __ fstp_s(MemOperand(esp, 0));
859 __ fstp_s(MemOperand(esp, kPointerSize));
860 __ pop(eax);
861 __ xor_(MemOperand(esp, 0), eax);
862 __ fstp(0);
863 __ fld_s(MemOperand(esp, 0));
864 __ pop(eax); // restore esp
865 __ pop(eax); // restore esp
866 __ jmp(&return_left, Label::kNear);
867
868
869 __ bind(&return_right);
870 __ fxch();
871
872 __ bind(&return_left);
873 __ fstp(0);
874 __ lea(esp, Operand(esp, 2 * kFloatSize));
875 break;
876 }
877 case kX87Float32Sqrt: {
878 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
879 __ VerifyX87StackDepth(1);
880 }
881 __ fstp(0);
882 __ fld_s(MemOperand(esp, 0));
883 __ fsqrt();
884 __ lea(esp, Operand(esp, kFloatSize));
885 break;
886 }
887 case kX87Float32Abs: {
888 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
889 __ VerifyX87StackDepth(1);
890 }
891 __ fstp(0);
892 __ fld_s(MemOperand(esp, 0));
893 __ fabs();
894 __ lea(esp, Operand(esp, kFloatSize));
895 break;
896 }
897 case kX87Float32Round: {
898 RoundingMode mode =
899 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
900 // Set the correct round mode in x87 control register
901 __ X87SetRC((mode << 10));
902
903 if (!instr->InputAt(0)->IsDoubleRegister()) {
904 InstructionOperand* input = instr->InputAt(0);
905 USE(input);
906 DCHECK(input->IsDoubleStackSlot());
907 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
908 __ VerifyX87StackDepth(1);
909 }
910 __ fstp(0);
911 __ fld_s(i.InputOperand(0));
912 }
913 __ frndint();
914 __ X87SetRC(0x0000);
915 break;
916 }
917 case kX87Float64Add: {
918 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
919 __ VerifyX87StackDepth(1);
920 }
921 __ X87SetFPUCW(0x027F);
922 __ fstp(0);
923 __ fld_d(MemOperand(esp, 0));
924 __ fld_d(MemOperand(esp, kDoubleSize));
925 __ faddp();
926 // Clear stack.
927 __ lea(esp, Operand(esp, 2 * kDoubleSize));
928 // Restore the default value of control word.
929 __ X87SetFPUCW(0x037F);
930 break;
931 }
932 case kX87Float64Sub: {
933 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
934 __ VerifyX87StackDepth(1);
935 }
936 __ X87SetFPUCW(0x027F);
937 __ fstp(0);
938 __ fld_d(MemOperand(esp, kDoubleSize));
939 __ fsub_d(MemOperand(esp, 0));
940 // Clear stack.
941 __ lea(esp, Operand(esp, 2 * kDoubleSize));
942 // Restore the default value of control word.
943 __ X87SetFPUCW(0x037F);
944 break;
945 }
946 case kX87Float64Mul: {
947 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
948 __ VerifyX87StackDepth(1);
949 }
950 __ X87SetFPUCW(0x027F);
951 __ fstp(0);
952 __ fld_d(MemOperand(esp, kDoubleSize));
953 __ fmul_d(MemOperand(esp, 0));
954 // Clear stack.
955 __ lea(esp, Operand(esp, 2 * kDoubleSize));
956 // Restore the default value of control word.
957 __ X87SetFPUCW(0x037F);
958 break;
959 }
960 case kX87Float64Div: {
961 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
962 __ VerifyX87StackDepth(1);
963 }
964 __ X87SetFPUCW(0x027F);
965 __ fstp(0);
966 __ fld_d(MemOperand(esp, kDoubleSize));
967 __ fdiv_d(MemOperand(esp, 0));
968 // Clear stack.
969 __ lea(esp, Operand(esp, 2 * kDoubleSize));
970 // Restore the default value of control word.
971 __ X87SetFPUCW(0x037F);
972 break;
973 }
974 case kX87Float64Mod: {
975 FrameScope frame_scope(&masm_, StackFrame::MANUAL);
976 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
977 __ VerifyX87StackDepth(1);
978 }
979 __ mov(eax, esp);
980 __ PrepareCallCFunction(4, eax);
981 __ fstp(0);
982 __ fld_d(MemOperand(eax, 0));
983 __ fstp_d(Operand(esp, 1 * kDoubleSize));
984 __ fld_d(MemOperand(eax, kDoubleSize));
985 __ fstp_d(Operand(esp, 0));
986 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
987 4);
988 __ lea(esp, Operand(esp, 2 * kDoubleSize));
989 break;
990 }
991 case kX87Float64Max: {
992 Label check_zero, return_left, return_right;
993 Condition condition = below;
994 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
995 __ VerifyX87StackDepth(1);
996 }
997 __ fstp(0);
998 __ fld_d(MemOperand(esp, kDoubleSize));
999 __ fld_d(MemOperand(esp, 0));
1000 __ fld(1);
1001 __ fld(1);
1002 __ FCmp();
1003 __ j(parity_even, &return_right,
1004 Label::kNear); // At least one NaN, Return right.
1005 __ j(equal, &check_zero, Label::kNear); // left == right.
1006 __ j(condition, &return_left, Label::kNear);
1007 __ jmp(&return_right, Label::kNear);
1008
1009 __ bind(&check_zero);
1010 __ fld(0);
1011 __ fldz();
1012 __ FCmp();
1013 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1014
1015 __ bind(&return_right);
1016 __ fxch();
1017
1018 __ bind(&return_left);
1019 __ fstp(0);
1020 __ lea(esp, Operand(esp, 2 * kDoubleSize));
1021 break;
1022 }
1023 case kX87Float64Min: {
1024 Label check_zero, return_left, return_right;
1025 Condition condition = above;
1026 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1027 __ VerifyX87StackDepth(1);
1028 }
1029 __ fstp(0);
1030 __ fld_d(MemOperand(esp, kDoubleSize));
1031 __ fld_d(MemOperand(esp, 0));
1032 __ fld(1);
1033 __ fld(1);
1034 __ FCmp();
1035 __ j(parity_even, &return_right,
1036 Label::kNear); // At least one NaN, return right value.
1037 __ j(equal, &check_zero, Label::kNear); // left == right.
1038 __ j(condition, &return_left, Label::kNear);
1039 __ jmp(&return_right, Label::kNear);
1040
1041 __ bind(&check_zero);
1042 __ fld(0);
1043 __ fldz();
1044 __ FCmp();
1045 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1046
1047 __ bind(&return_right);
1048 __ fxch();
1049
1050 __ bind(&return_left);
1051 __ fstp(0);
1052 __ lea(esp, Operand(esp, 2 * kDoubleSize));
1053 break;
1054 }
1055 case kX87Float64Abs: {
1056 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1057 __ VerifyX87StackDepth(1);
1058 }
1059 __ fstp(0);
1060 __ fld_d(MemOperand(esp, 0));
1061 __ fabs();
1062 __ lea(esp, Operand(esp, kDoubleSize));
1063 break;
1064 }
1065 case kX87Int32ToFloat64: {
1066 InstructionOperand* input = instr->InputAt(0);
1067 DCHECK(input->IsRegister() || input->IsStackSlot());
1068 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1069 __ VerifyX87StackDepth(1);
1070 }
1071 __ fstp(0);
1072 if (input->IsRegister()) {
1073 Register input_reg = i.InputRegister(0);
1074 __ push(input_reg);
1075 __ fild_s(Operand(esp, 0));
1076 __ pop(input_reg);
1077 } else {
1078 __ fild_s(i.InputOperand(0));
1079 }
1080 break;
1081 }
1082 case kX87Float32ToFloat64: {
1083 InstructionOperand* input = instr->InputAt(0);
1084 if (input->IsDoubleRegister()) {
1085 __ sub(esp, Immediate(kDoubleSize));
1086 __ fstp_d(MemOperand(esp, 0));
1087 __ fld_d(MemOperand(esp, 0));
1088 __ add(esp, Immediate(kDoubleSize));
1089 } else {
1090 DCHECK(input->IsDoubleStackSlot());
1091 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1092 __ VerifyX87StackDepth(1);
1093 }
1094 __ fstp(0);
1095 __ fld_s(i.InputOperand(0));
1096 }
1097 break;
1098 }
1099 case kX87Uint32ToFloat64: {
1100 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1101 __ VerifyX87StackDepth(1);
1102 }
1103 __ fstp(0);
1104 __ LoadUint32NoSSE2(i.InputRegister(0));
1105 break;
1106 }
1107 case kX87Float64ToInt32: {
1108 if (!instr->InputAt(0)->IsDoubleRegister()) {
1109 __ fld_d(i.InputOperand(0));
1110 }
1111 __ TruncateX87TOSToI(i.OutputRegister(0));
1112 if (!instr->InputAt(0)->IsDoubleRegister()) {
1113 __ fstp(0);
1114 }
1115 break;
1116 }
1117 case kX87Float64ToFloat32: {
1118 InstructionOperand* input = instr->InputAt(0);
1119 if (input->IsDoubleRegister()) {
1120 __ sub(esp, Immediate(kDoubleSize));
1121 __ fstp_s(MemOperand(esp, 0));
1122 __ fld_s(MemOperand(esp, 0));
1123 __ add(esp, Immediate(kDoubleSize));
1124 } else {
1125 DCHECK(input->IsDoubleStackSlot());
1126 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1127 __ VerifyX87StackDepth(1);
1128 }
1129 __ fstp(0);
1130 __ fld_d(i.InputOperand(0));
1131 __ sub(esp, Immediate(kDoubleSize));
1132 __ fstp_s(MemOperand(esp, 0));
1133 __ fld_s(MemOperand(esp, 0));
1134 __ add(esp, Immediate(kDoubleSize));
1135 }
1136 break;
1137 }
1138 case kX87Float64ToUint32: {
1139 __ push_imm32(-2147483648);
1140 if (!instr->InputAt(0)->IsDoubleRegister()) {
1141 __ fld_d(i.InputOperand(0));
1142 }
1143 __ fild_s(Operand(esp, 0));
1144 __ fadd(1);
1145 __ fstp(0);
1146 __ TruncateX87TOSToI(i.OutputRegister(0));
1147 __ add(esp, Immediate(kInt32Size));
1148 __ add(i.OutputRegister(), Immediate(0x80000000));
1149 if (!instr->InputAt(0)->IsDoubleRegister()) {
1150 __ fstp(0);
1151 }
1152 break;
1153 }
1154 case kX87Float64ExtractHighWord32: {
1155 if (instr->InputAt(0)->IsDoubleRegister()) {
1156 __ sub(esp, Immediate(kDoubleSize));
1157 __ fst_d(MemOperand(esp, 0));
1158 __ mov(i.OutputRegister(), MemOperand(esp, kDoubleSize / 2));
1159 __ add(esp, Immediate(kDoubleSize));
1160 } else {
1161 InstructionOperand* input = instr->InputAt(0);
1162 USE(input);
1163 DCHECK(input->IsDoubleStackSlot());
1164 __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
1165 }
1166 break;
1167 }
1168 case kX87Float64ExtractLowWord32: {
1169 if (instr->InputAt(0)->IsDoubleRegister()) {
1170 __ sub(esp, Immediate(kDoubleSize));
1171 __ fst_d(MemOperand(esp, 0));
1172 __ mov(i.OutputRegister(), MemOperand(esp, 0));
1173 __ add(esp, Immediate(kDoubleSize));
1174 } else {
1175 InstructionOperand* input = instr->InputAt(0);
1176 USE(input);
1177 DCHECK(input->IsDoubleStackSlot());
1178 __ mov(i.OutputRegister(), i.InputOperand(0));
1179 }
1180 break;
1181 }
1182 case kX87Float64InsertHighWord32: {
1183 __ sub(esp, Immediate(kDoubleSize));
1184 __ fstp_d(MemOperand(esp, 0));
1185 __ mov(MemOperand(esp, kDoubleSize / 2), i.InputRegister(1));
1186 __ fld_d(MemOperand(esp, 0));
1187 __ add(esp, Immediate(kDoubleSize));
1188 break;
1189 }
1190 case kX87Float64InsertLowWord32: {
1191 __ sub(esp, Immediate(kDoubleSize));
1192 __ fstp_d(MemOperand(esp, 0));
1193 __ mov(MemOperand(esp, 0), i.InputRegister(1));
1194 __ fld_d(MemOperand(esp, 0));
1195 __ add(esp, Immediate(kDoubleSize));
1196 break;
1197 }
1198 case kX87Float64Sqrt: {
1199 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1200 __ VerifyX87StackDepth(1);
1201 }
1202 __ X87SetFPUCW(0x027F);
1203 __ fstp(0);
1204 __ fld_d(MemOperand(esp, 0));
1205 __ fsqrt();
1206 __ lea(esp, Operand(esp, kDoubleSize));
1207 __ X87SetFPUCW(0x037F);
1208 break;
1209 }
1210 case kX87Float64Round: {
1211 RoundingMode mode =
1212 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
1213 // Set the correct round mode in x87 control register
1214 __ X87SetRC((mode << 10));
1215
1216 if (!instr->InputAt(0)->IsDoubleRegister()) {
1217 InstructionOperand* input = instr->InputAt(0);
1218 USE(input);
1219 DCHECK(input->IsDoubleStackSlot());
1220 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1221 __ VerifyX87StackDepth(1);
1222 }
1223 __ fstp(0);
1224 __ fld_d(i.InputOperand(0));
1225 }
1226 __ frndint();
1227 __ X87SetRC(0x0000);
1228 break;
1229 }
1230 case kX87Float64Cmp: {
1231 __ fld_d(MemOperand(esp, kDoubleSize));
1232 __ fld_d(MemOperand(esp, 0));
1233 __ FCmp();
1234 __ lea(esp, Operand(esp, 2 * kDoubleSize));
1235 break;
1236 }
1237 case kX87Movsxbl:
1238 __ movsx_b(i.OutputRegister(), i.MemoryOperand());
1239 break;
1240 case kX87Movzxbl:
1241 __ movzx_b(i.OutputRegister(), i.MemoryOperand());
1242 break;
1243 case kX87Movb: {
1244 size_t index = 0;
1245 Operand operand = i.MemoryOperand(&index);
1246 if (HasImmediateInput(instr, index)) {
1247 __ mov_b(operand, i.InputInt8(index));
1248 } else {
1249 __ mov_b(operand, i.InputRegister(index));
1250 }
1251 break;
1252 }
1253 case kX87Movsxwl:
1254 __ movsx_w(i.OutputRegister(), i.MemoryOperand());
1255 break;
1256 case kX87Movzxwl:
1257 __ movzx_w(i.OutputRegister(), i.MemoryOperand());
1258 break;
1259 case kX87Movw: {
1260 size_t index = 0;
1261 Operand operand = i.MemoryOperand(&index);
1262 if (HasImmediateInput(instr, index)) {
1263 __ mov_w(operand, i.InputInt16(index));
1264 } else {
1265 __ mov_w(operand, i.InputRegister(index));
1266 }
1267 break;
1268 }
1269 case kX87Movl:
1270 if (instr->HasOutput()) {
1271 __ mov(i.OutputRegister(), i.MemoryOperand());
1272 } else {
1273 size_t index = 0;
1274 Operand operand = i.MemoryOperand(&index);
1275 if (HasImmediateInput(instr, index)) {
1276 __ mov(operand, i.InputImmediate(index));
1277 } else {
1278 __ mov(operand, i.InputRegister(index));
1279 }
1280 }
1281 break;
1282 case kX87Movsd: {
1283 if (instr->HasOutput()) {
1284 X87Register output = i.OutputDoubleRegister();
1285 USE(output);
1286 DCHECK(output.code() == 0);
1287 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1288 __ VerifyX87StackDepth(1);
1289 }
1290 __ fstp(0);
1291 __ fld_d(i.MemoryOperand());
1292 } else {
1293 size_t index = 0;
1294 Operand operand = i.MemoryOperand(&index);
1295 __ fst_d(operand);
1296 }
1297 break;
1298 }
1299 case kX87Movss: {
1300 if (instr->HasOutput()) {
1301 X87Register output = i.OutputDoubleRegister();
1302 USE(output);
1303 DCHECK(output.code() == 0);
1304 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1305 __ VerifyX87StackDepth(1);
1306 }
1307 __ fstp(0);
1308 __ fld_s(i.MemoryOperand());
1309 } else {
1310 size_t index = 0;
1311 Operand operand = i.MemoryOperand(&index);
1312 __ fst_s(operand);
1313 }
1314 break;
1315 }
1316 case kX87BitcastFI: {
1317 __ mov(i.OutputRegister(), MemOperand(esp, 0));
1318 __ lea(esp, Operand(esp, kFloatSize));
1319 break;
1320 }
1321 case kX87BitcastIF: {
1322 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1323 __ VerifyX87StackDepth(1);
1324 }
1325 __ fstp(0);
1326 if (instr->InputAt(0)->IsRegister()) {
1327 __ lea(esp, Operand(esp, -kFloatSize));
1328 __ mov(MemOperand(esp, 0), i.InputRegister(0));
1329 __ fld_s(MemOperand(esp, 0));
1330 __ lea(esp, Operand(esp, kFloatSize));
1331 } else {
1332 __ fld_s(i.InputOperand(0));
1333 }
1334 break;
1335 }
1336 case kX87Lea: {
1337 AddressingMode mode = AddressingModeField::decode(instr->opcode());
1338 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
1339 // and addressing mode just happens to work out. The "addl"/"subl" forms
1340 // in these cases are faster based on measurements.
1341 if (mode == kMode_MI) {
1342 __ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
1343 } else if (i.InputRegister(0).is(i.OutputRegister())) {
1344 if (mode == kMode_MRI) {
1345 int32_t constant_summand = i.InputInt32(1);
1346 if (constant_summand > 0) {
1347 __ add(i.OutputRegister(), Immediate(constant_summand));
1348 } else if (constant_summand < 0) {
1349 __ sub(i.OutputRegister(), Immediate(-constant_summand));
1350 }
1351 } else if (mode == kMode_MR1) {
1352 if (i.InputRegister(1).is(i.OutputRegister())) {
1353 __ shl(i.OutputRegister(), 1);
1354 } else {
1355 __ lea(i.OutputRegister(), i.MemoryOperand());
1356 }
1357 } else if (mode == kMode_M2) {
1358 __ shl(i.OutputRegister(), 1);
1359 } else if (mode == kMode_M4) {
1360 __ shl(i.OutputRegister(), 2);
1361 } else if (mode == kMode_M8) {
1362 __ shl(i.OutputRegister(), 3);
1363 } else {
1364 __ lea(i.OutputRegister(), i.MemoryOperand());
1365 }
1366 } else {
1367 __ lea(i.OutputRegister(), i.MemoryOperand());
1368 }
1369 break;
1370 }
1371 case kX87Push:
1372 if (instr->InputAt(0)->IsDoubleRegister()) {
1373 auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
1374 if (allocated.representation() == MachineRepresentation::kFloat32) {
1375 __ sub(esp, Immediate(kDoubleSize));
1376 __ fst_s(Operand(esp, 0));
1377 } else {
1378 DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
1379 __ sub(esp, Immediate(kDoubleSize));
1380 __ fst_d(Operand(esp, 0));
1381 }
1382 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1383 } else if (instr->InputAt(0)->IsDoubleStackSlot()) {
1384 auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
1385 if (allocated.representation() == MachineRepresentation::kFloat32) {
1386 __ sub(esp, Immediate(kDoubleSize));
1387 __ fld_s(i.InputOperand(0));
1388 __ fstp_s(MemOperand(esp, 0));
1389 } else {
1390 DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
1391 __ sub(esp, Immediate(kDoubleSize));
1392 __ fld_d(i.InputOperand(0));
1393 __ fstp_d(MemOperand(esp, 0));
1394 }
1395 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1396 } else if (HasImmediateInput(instr, 0)) {
1397 __ push(i.InputImmediate(0));
1398 frame_access_state()->IncreaseSPDelta(1);
1399 } else {
1400 __ push(i.InputOperand(0));
1401 frame_access_state()->IncreaseSPDelta(1);
1402 }
1403 break;
1404 case kX87Poke: {
1405 int const slot = MiscField::decode(instr->opcode());
1406 if (HasImmediateInput(instr, 0)) {
1407 __ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
1408 } else {
1409 __ mov(Operand(esp, slot * kPointerSize), i.InputRegister(0));
1410 }
1411 break;
1412 }
1413 case kX87PushFloat32:
1414 __ lea(esp, Operand(esp, -kFloatSize));
1415 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1416 __ fld_s(i.InputOperand(0));
1417 __ fstp_s(MemOperand(esp, 0));
1418 } else if (instr->InputAt(0)->IsDoubleRegister()) {
1419 __ fst_s(MemOperand(esp, 0));
1420 } else {
1421 UNREACHABLE();
1422 }
1423 break;
1424 case kX87PushFloat64:
1425 __ lea(esp, Operand(esp, -kDoubleSize));
1426 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1427 __ fld_d(i.InputOperand(0));
1428 __ fstp_d(MemOperand(esp, 0));
1429 } else if (instr->InputAt(0)->IsDoubleRegister()) {
1430 __ fst_d(MemOperand(esp, 0));
1431 } else {
1432 UNREACHABLE();
1433 }
1434 break;
1435 case kCheckedLoadInt8:
1436 ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
1437 break;
1438 case kCheckedLoadUint8:
1439 ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
1440 break;
1441 case kCheckedLoadInt16:
1442 ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
1443 break;
1444 case kCheckedLoadUint16:
1445 ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
1446 break;
1447 case kCheckedLoadWord32:
1448 ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
1449 break;
1450 case kCheckedLoadFloat32:
1451 ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s);
1452 break;
1453 case kCheckedLoadFloat64:
1454 ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d);
1455 break;
1456 case kCheckedStoreWord8:
1457 ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
1458 break;
1459 case kCheckedStoreWord16:
1460 ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
1461 break;
1462 case kCheckedStoreWord32:
1463 ASSEMBLE_CHECKED_STORE_INTEGER(mov);
1464 break;
1465 case kCheckedStoreFloat32:
1466 ASSEMBLE_CHECKED_STORE_FLOAT(fst_s);
1467 break;
1468 case kCheckedStoreFloat64:
1469 ASSEMBLE_CHECKED_STORE_FLOAT(fst_d);
1470 break;
1471 case kX87StackCheck: {
1472 ExternalReference const stack_limit =
1473 ExternalReference::address_of_stack_limit(isolate());
1474 __ cmp(esp, Operand::StaticVariable(stack_limit));
1475 break;
1476 }
1477 case kCheckedLoadWord64:
1478 case kCheckedStoreWord64:
1479 UNREACHABLE(); // currently unsupported checked int64 load/store.
1480 break;
1481 }
1482} // NOLINT(readability/fn_size)
1483
1484
1485// Assembles a branch after an instruction.
1486void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1487 X87OperandConverter i(this, instr);
1488 Label::Distance flabel_distance =
1489 branch->fallthru ? Label::kNear : Label::kFar;
1490 Label* tlabel = branch->true_label;
1491 Label* flabel = branch->false_label;
1492 switch (branch->condition) {
1493 case kUnorderedEqual:
1494 __ j(parity_even, flabel, flabel_distance);
1495 // Fall through.
1496 case kEqual:
1497 __ j(equal, tlabel);
1498 break;
1499 case kUnorderedNotEqual:
1500 __ j(parity_even, tlabel);
1501 // Fall through.
1502 case kNotEqual:
1503 __ j(not_equal, tlabel);
1504 break;
1505 case kSignedLessThan:
1506 __ j(less, tlabel);
1507 break;
1508 case kSignedGreaterThanOrEqual:
1509 __ j(greater_equal, tlabel);
1510 break;
1511 case kSignedLessThanOrEqual:
1512 __ j(less_equal, tlabel);
1513 break;
1514 case kSignedGreaterThan:
1515 __ j(greater, tlabel);
1516 break;
1517 case kUnsignedLessThan:
1518 __ j(below, tlabel);
1519 break;
1520 case kUnsignedGreaterThanOrEqual:
1521 __ j(above_equal, tlabel);
1522 break;
1523 case kUnsignedLessThanOrEqual:
1524 __ j(below_equal, tlabel);
1525 break;
1526 case kUnsignedGreaterThan:
1527 __ j(above, tlabel);
1528 break;
1529 case kOverflow:
1530 __ j(overflow, tlabel);
1531 break;
1532 case kNotOverflow:
1533 __ j(no_overflow, tlabel);
1534 break;
1535 default:
1536 UNREACHABLE();
1537 break;
1538 }
1539 // Add a jump if not falling through to the next block.
1540 if (!branch->fallthru) __ jmp(flabel);
1541}
1542
1543
1544void CodeGenerator::AssembleArchJump(RpoNumber target) {
1545 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1546}
1547
1548
1549// Assembles boolean materializations after an instruction.
1550void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1551 FlagsCondition condition) {
1552 X87OperandConverter i(this, instr);
1553 Label done;
1554
1555 // Materialize a full 32-bit 1 or 0 value. The result register is always the
1556 // last output of the instruction.
1557 Label check;
1558 DCHECK_NE(0u, instr->OutputCount());
1559 Register reg = i.OutputRegister(instr->OutputCount() - 1);
1560 Condition cc = no_condition;
1561 switch (condition) {
1562 case kUnorderedEqual:
1563 __ j(parity_odd, &check, Label::kNear);
1564 __ Move(reg, Immediate(0));
1565 __ jmp(&done, Label::kNear);
1566 // Fall through.
1567 case kEqual:
1568 cc = equal;
1569 break;
1570 case kUnorderedNotEqual:
1571 __ j(parity_odd, &check, Label::kNear);
1572 __ mov(reg, Immediate(1));
1573 __ jmp(&done, Label::kNear);
1574 // Fall through.
1575 case kNotEqual:
1576 cc = not_equal;
1577 break;
1578 case kSignedLessThan:
1579 cc = less;
1580 break;
1581 case kSignedGreaterThanOrEqual:
1582 cc = greater_equal;
1583 break;
1584 case kSignedLessThanOrEqual:
1585 cc = less_equal;
1586 break;
1587 case kSignedGreaterThan:
1588 cc = greater;
1589 break;
1590 case kUnsignedLessThan:
1591 cc = below;
1592 break;
1593 case kUnsignedGreaterThanOrEqual:
1594 cc = above_equal;
1595 break;
1596 case kUnsignedLessThanOrEqual:
1597 cc = below_equal;
1598 break;
1599 case kUnsignedGreaterThan:
1600 cc = above;
1601 break;
1602 case kOverflow:
1603 cc = overflow;
1604 break;
1605 case kNotOverflow:
1606 cc = no_overflow;
1607 break;
1608 default:
1609 UNREACHABLE();
1610 break;
1611 }
1612 __ bind(&check);
1613 if (reg.is_byte_register()) {
1614 // setcc for byte registers (al, bl, cl, dl).
1615 __ setcc(cc, reg);
1616 __ movzx_b(reg, reg);
1617 } else {
1618 // Emit a branch to set a register to either 1 or 0.
1619 Label set;
1620 __ j(cc, &set, Label::kNear);
1621 __ Move(reg, Immediate(0));
1622 __ jmp(&done, Label::kNear);
1623 __ bind(&set);
1624 __ mov(reg, Immediate(1));
1625 }
1626 __ bind(&done);
1627}
1628
1629
1630void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1631 X87OperandConverter i(this, instr);
1632 Register input = i.InputRegister(0);
1633 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1634 __ cmp(input, Immediate(i.InputInt32(index + 0)));
1635 __ j(equal, GetLabel(i.InputRpo(index + 1)));
1636 }
1637 AssembleArchJump(i.InputRpo(1));
1638}
1639
1640
1641void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1642 X87OperandConverter i(this, instr);
1643 Register input = i.InputRegister(0);
1644 size_t const case_count = instr->InputCount() - 2;
1645 Label** cases = zone()->NewArray<Label*>(case_count);
1646 for (size_t index = 0; index < case_count; ++index) {
1647 cases[index] = GetLabel(i.InputRpo(index + 2));
1648 }
1649 Label* const table = AddJumpTable(cases, case_count);
1650 __ cmp(input, Immediate(case_count));
1651 __ j(above_equal, GetLabel(i.InputRpo(1)));
1652 __ jmp(Operand::JumpTable(input, times_4, table));
1653}
1654
1655
1656void CodeGenerator::AssembleDeoptimizerCall(
1657 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1658 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1659 isolate(), deoptimization_id, bailout_type);
1660 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1661}
1662
1663
1664// The calling convention for JSFunctions on X87 passes arguments on the
1665// stack and the JSFunction and context in EDI and ESI, respectively, thus
1666// the steps of the call look as follows:
1667
1668// --{ before the call instruction }--------------------------------------------
1669// | caller frame |
1670// ^ esp ^ ebp
1671
1672// --{ push arguments and setup ESI, EDI }--------------------------------------
1673// | args + receiver | caller frame |
1674// ^ esp ^ ebp
1675// [edi = JSFunction, esi = context]
1676
1677// --{ call [edi + kCodeEntryOffset] }------------------------------------------
1678// | RET | args + receiver | caller frame |
1679// ^ esp ^ ebp
1680
1681// =={ prologue of called function }============================================
1682// --{ push ebp }---------------------------------------------------------------
1683// | FP | RET | args + receiver | caller frame |
1684// ^ esp ^ ebp
1685
1686// --{ mov ebp, esp }-----------------------------------------------------------
1687// | FP | RET | args + receiver | caller frame |
1688// ^ ebp,esp
1689
1690// --{ push esi }---------------------------------------------------------------
1691// | CTX | FP | RET | args + receiver | caller frame |
1692// ^esp ^ ebp
1693
1694// --{ push edi }---------------------------------------------------------------
1695// | FNC | CTX | FP | RET | args + receiver | caller frame |
1696// ^esp ^ ebp
1697
1698// --{ subi esp, #N }-----------------------------------------------------------
1699// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
1700// ^esp ^ ebp
1701
1702// =={ body of called function }================================================
1703
1704// =={ epilogue of called function }============================================
1705// --{ mov esp, ebp }-----------------------------------------------------------
1706// | FP | RET | args + receiver | caller frame |
1707// ^ esp,ebp
1708
1709// --{ pop ebp }-----------------------------------------------------------
1710// | | RET | args + receiver | caller frame |
1711// ^ esp ^ ebp
1712
1713// --{ ret #A+1 }-----------------------------------------------------------
1714// | | caller frame |
1715// ^ esp ^ ebp
1716
1717
1718// Runtime function calls are accomplished by doing a stub call to the
1719// CEntryStub (a real code object). On X87 passes arguments on the
1720// stack, the number of arguments in EAX, the address of the runtime function
1721// in EBX, and the context in ESI.
1722
1723// --{ before the call instruction }--------------------------------------------
1724// | caller frame |
1725// ^ esp ^ ebp
1726
1727// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
1728// | args + receiver | caller frame |
1729// ^ esp ^ ebp
1730// [eax = #args, ebx = runtime function, esi = context]
1731
1732// --{ call #CEntryStub }-------------------------------------------------------
1733// | RET | args + receiver | caller frame |
1734// ^ esp ^ ebp
1735
1736// =={ body of runtime function }===============================================
1737
1738// --{ runtime returns }--------------------------------------------------------
1739// | caller frame |
1740// ^ esp ^ ebp
1741
1742// Other custom linkages (e.g. for calling directly into and out of C++) may
1743// need to save callee-saved registers on the stack, which is done in the
1744// function prologue of generated code.
1745
1746// --{ before the call instruction }--------------------------------------------
1747// | caller frame |
1748// ^ esp ^ ebp
1749
1750// --{ set up arguments in registers on stack }---------------------------------
1751// | args | caller frame |
1752// ^ esp ^ ebp
1753// [r0 = arg0, r1 = arg1, ...]
1754
1755// --{ call code }--------------------------------------------------------------
1756// | RET | args | caller frame |
1757// ^ esp ^ ebp
1758
1759// =={ prologue of called function }============================================
1760// --{ push ebp }---------------------------------------------------------------
1761// | FP | RET | args | caller frame |
1762// ^ esp ^ ebp
1763
1764// --{ mov ebp, esp }-----------------------------------------------------------
1765// | FP | RET | args | caller frame |
1766// ^ ebp,esp
1767
1768// --{ save registers }---------------------------------------------------------
1769// | regs | FP | RET | args | caller frame |
1770// ^ esp ^ ebp
1771
1772// --{ subi esp, #N }-----------------------------------------------------------
1773// | callee frame | regs | FP | RET | args | caller frame |
1774// ^esp ^ ebp
1775
1776// =={ body of called function }================================================
1777
1778// =={ epilogue of called function }============================================
1779// --{ restore registers }------------------------------------------------------
1780// | regs | FP | RET | args | caller frame |
1781// ^ esp ^ ebp
1782
1783// --{ mov esp, ebp }-----------------------------------------------------------
1784// | FP | RET | args | caller frame |
1785// ^ esp,ebp
1786
1787// --{ pop ebp }----------------------------------------------------------------
1788// | RET | args | caller frame |
1789// ^ esp ^ ebp
1790
1791
1792void CodeGenerator::AssemblePrologue() {
1793 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1794 if (descriptor->IsCFunctionCall()) {
1795 // Assemble a prologue similar the to cdecl calling convention.
1796 __ push(ebp);
1797 __ mov(ebp, esp);
1798 } else if (descriptor->IsJSFunctionCall()) {
1799 // TODO(turbofan): this prologue is redundant with OSR, but needed for
1800 // code aging.
1801 __ Prologue(this->info()->GeneratePreagedPrologue());
1802 } else if (frame()->needs_frame()) {
1803 __ StubPrologue();
1804 } else {
1805 frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
1806 }
1807 frame_access_state()->SetFrameAccessToDefault();
1808
1809 int stack_shrink_slots = frame()->GetSpillSlotCount();
1810 if (info()->is_osr()) {
1811 // TurboFan OSR-compiled functions cannot be entered directly.
1812 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1813
1814 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1815 // frame is still on the stack. Optimized code uses OSR values directly from
1816 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1817 // remaining stack slots.
1818 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1819 osr_pc_offset_ = __ pc_offset();
1820 // TODO(titzer): cannot address target function == local #-1
1821 __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
1822 stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1823 }
1824
1825 const RegList saves = descriptor->CalleeSavedRegisters();
1826 if (stack_shrink_slots > 0) {
1827 __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
1828 }
1829
1830 if (saves != 0) { // Save callee-saved registers.
1831 DCHECK(!info()->is_osr());
1832 int pushed = 0;
1833 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1834 if (!((1 << i) & saves)) continue;
1835 __ push(Register::from_code(i));
1836 ++pushed;
1837 }
1838 frame()->AllocateSavedCalleeRegisterSlots(pushed);
1839 }
1840
1841 // Initailize FPU state.
1842 __ fninit();
1843 __ fld1();
1844}
1845
1846
1847void CodeGenerator::AssembleReturn() {
1848 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1849
1850 // Clear the FPU stack only if there is no return value in the stack.
1851 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1852 __ VerifyX87StackDepth(1);
1853 }
1854 bool clear_stack = true;
1855 for (int i = 0; i < descriptor->ReturnCount(); i++) {
1856 MachineRepresentation rep = descriptor->GetReturnType(i).representation();
1857 LinkageLocation loc = descriptor->GetReturnLocation(i);
1858 if (IsFloatingPoint(rep) && loc == LinkageLocation::ForRegister(0)) {
1859 clear_stack = false;
1860 break;
1861 }
1862 }
1863 if (clear_stack) __ fstp(0);
1864
1865 int pop_count = static_cast<int>(descriptor->StackParameterCount());
1866 const RegList saves = descriptor->CalleeSavedRegisters();
1867 // Restore registers.
1868 if (saves != 0) {
1869 for (int i = 0; i < Register::kNumRegisters; i++) {
1870 if (!((1 << i) & saves)) continue;
1871 __ pop(Register::from_code(i));
1872 }
1873 }
1874
1875 if (descriptor->IsCFunctionCall()) {
1876 __ mov(esp, ebp); // Move stack pointer back to frame pointer.
1877 __ pop(ebp); // Pop caller's frame pointer.
1878 } else if (frame()->needs_frame()) {
1879 // Canonicalize JSFunction return sites for now.
1880 if (return_label_.is_bound()) {
1881 __ jmp(&return_label_);
1882 return;
1883 } else {
1884 __ bind(&return_label_);
1885 __ mov(esp, ebp); // Move stack pointer back to frame pointer.
1886 __ pop(ebp); // Pop caller's frame pointer.
1887 }
1888 }
1889 if (pop_count == 0) {
1890 __ ret(0);
1891 } else {
1892 __ Ret(pop_count * kPointerSize, ebx);
1893 }
1894}
1895
1896
1897void CodeGenerator::AssembleMove(InstructionOperand* source,
1898 InstructionOperand* destination) {
1899 X87OperandConverter g(this, nullptr);
1900 // Dispatch on the source and destination operand kinds. Not all
1901 // combinations are possible.
1902 if (source->IsRegister()) {
1903 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1904 Register src = g.ToRegister(source);
1905 Operand dst = g.ToOperand(destination);
1906 __ mov(dst, src);
1907 } else if (source->IsStackSlot()) {
1908 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1909 Operand src = g.ToOperand(source);
1910 if (destination->IsRegister()) {
1911 Register dst = g.ToRegister(destination);
1912 __ mov(dst, src);
1913 } else {
1914 Operand dst = g.ToOperand(destination);
1915 __ push(src);
1916 __ pop(dst);
1917 }
1918 } else if (source->IsConstant()) {
1919 Constant src_constant = g.ToConstant(source);
1920 if (src_constant.type() == Constant::kHeapObject) {
1921 Handle<HeapObject> src = src_constant.ToHeapObject();
1922 int offset;
1923 if (IsMaterializableFromFrame(src, &offset)) {
1924 if (destination->IsRegister()) {
1925 Register dst = g.ToRegister(destination);
1926 __ mov(dst, g.ToMaterializableOperand(offset));
1927 } else {
1928 DCHECK(destination->IsStackSlot());
1929 Operand dst = g.ToOperand(destination);
1930 __ push(g.ToMaterializableOperand(offset));
1931 __ pop(dst);
1932 }
1933 } else if (destination->IsRegister()) {
1934 Register dst = g.ToRegister(destination);
1935 __ LoadHeapObject(dst, src);
1936 } else {
1937 DCHECK(destination->IsStackSlot());
1938 Operand dst = g.ToOperand(destination);
1939 AllowDeferredHandleDereference embedding_raw_address;
1940 if (isolate()->heap()->InNewSpace(*src)) {
1941 __ PushHeapObject(src);
1942 __ pop(dst);
1943 } else {
1944 __ mov(dst, src);
1945 }
1946 }
1947 } else if (destination->IsRegister()) {
1948 Register dst = g.ToRegister(destination);
1949 __ Move(dst, g.ToImmediate(source));
1950 } else if (destination->IsStackSlot()) {
1951 Operand dst = g.ToOperand(destination);
1952 __ Move(dst, g.ToImmediate(source));
1953 } else if (src_constant.type() == Constant::kFloat32) {
1954 // TODO(turbofan): Can we do better here?
1955 uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
1956 if (destination->IsDoubleRegister()) {
1957 __ sub(esp, Immediate(kInt32Size));
1958 __ mov(MemOperand(esp, 0), Immediate(src));
1959 // always only push one value into the x87 stack.
1960 __ fstp(0);
1961 __ fld_s(MemOperand(esp, 0));
1962 __ add(esp, Immediate(kInt32Size));
1963 } else {
1964 DCHECK(destination->IsDoubleStackSlot());
1965 Operand dst = g.ToOperand(destination);
1966 __ Move(dst, Immediate(src));
1967 }
1968 } else {
1969 DCHECK_EQ(Constant::kFloat64, src_constant.type());
1970 uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
1971 uint32_t lower = static_cast<uint32_t>(src);
1972 uint32_t upper = static_cast<uint32_t>(src >> 32);
1973 if (destination->IsDoubleRegister()) {
1974 __ sub(esp, Immediate(kDoubleSize));
1975 __ mov(MemOperand(esp, 0), Immediate(lower));
1976 __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
1977 // always only push one value into the x87 stack.
1978 __ fstp(0);
1979 __ fld_d(MemOperand(esp, 0));
1980 __ add(esp, Immediate(kDoubleSize));
1981 } else {
1982 DCHECK(destination->IsDoubleStackSlot());
1983 Operand dst0 = g.ToOperand(destination);
1984 Operand dst1 = g.HighOperand(destination);
1985 __ Move(dst0, Immediate(lower));
1986 __ Move(dst1, Immediate(upper));
1987 }
1988 }
1989 } else if (source->IsDoubleRegister()) {
1990 DCHECK(destination->IsDoubleStackSlot());
1991 Operand dst = g.ToOperand(destination);
1992 auto allocated = AllocatedOperand::cast(*source);
1993 switch (allocated.representation()) {
1994 case MachineRepresentation::kFloat32:
1995 __ fst_s(dst);
1996 break;
1997 case MachineRepresentation::kFloat64:
1998 __ fst_d(dst);
1999 break;
2000 default:
2001 UNREACHABLE();
2002 }
2003 } else if (source->IsDoubleStackSlot()) {
2004 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
2005 Operand src = g.ToOperand(source);
2006 auto allocated = AllocatedOperand::cast(*source);
2007 if (destination->IsDoubleRegister()) {
2008 // always only push one value into the x87 stack.
2009 __ fstp(0);
2010 switch (allocated.representation()) {
2011 case MachineRepresentation::kFloat32:
2012 __ fld_s(src);
2013 break;
2014 case MachineRepresentation::kFloat64:
2015 __ fld_d(src);
2016 break;
2017 default:
2018 UNREACHABLE();
2019 }
2020 } else {
2021 Operand dst = g.ToOperand(destination);
2022 switch (allocated.representation()) {
2023 case MachineRepresentation::kFloat32:
2024 __ fld_s(src);
2025 __ fstp_s(dst);
2026 break;
2027 case MachineRepresentation::kFloat64:
2028 __ fld_d(src);
2029 __ fstp_d(dst);
2030 break;
2031 default:
2032 UNREACHABLE();
2033 }
2034 }
2035 } else {
2036 UNREACHABLE();
2037 }
2038}
2039
2040
2041void CodeGenerator::AssembleSwap(InstructionOperand* source,
2042 InstructionOperand* destination) {
2043 X87OperandConverter g(this, nullptr);
2044 // Dispatch on the source and destination operand kinds. Not all
2045 // combinations are possible.
2046 if (source->IsRegister() && destination->IsRegister()) {
2047 // Register-register.
2048 Register src = g.ToRegister(source);
2049 Register dst = g.ToRegister(destination);
2050 __ xchg(dst, src);
2051 } else if (source->IsRegister() && destination->IsStackSlot()) {
2052 // Register-memory.
2053 __ xchg(g.ToRegister(source), g.ToOperand(destination));
2054 } else if (source->IsStackSlot() && destination->IsStackSlot()) {
2055 // Memory-memory.
2056 Operand dst1 = g.ToOperand(destination);
2057 __ push(dst1);
2058 frame_access_state()->IncreaseSPDelta(1);
2059 Operand src1 = g.ToOperand(source);
2060 __ push(src1);
2061 Operand dst2 = g.ToOperand(destination);
2062 __ pop(dst2);
2063 frame_access_state()->IncreaseSPDelta(-1);
2064 Operand src2 = g.ToOperand(source);
2065 __ pop(src2);
2066 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
2067 UNREACHABLE();
2068 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
2069 auto allocated = AllocatedOperand::cast(*source);
2070 switch (allocated.representation()) {
2071 case MachineRepresentation::kFloat32:
2072 __ fld_s(g.ToOperand(destination));
2073 __ fxch();
2074 __ fstp_s(g.ToOperand(destination));
2075 break;
2076 case MachineRepresentation::kFloat64:
2077 __ fld_d(g.ToOperand(destination));
2078 __ fxch();
2079 __ fstp_d(g.ToOperand(destination));
2080 break;
2081 default:
2082 UNREACHABLE();
2083 }
2084 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
2085 auto allocated = AllocatedOperand::cast(*source);
2086 switch (allocated.representation()) {
2087 case MachineRepresentation::kFloat32:
2088 __ fld_s(g.ToOperand(source));
2089 __ fld_s(g.ToOperand(destination));
2090 __ fstp_s(g.ToOperand(source));
2091 __ fstp_s(g.ToOperand(destination));
2092 break;
2093 case MachineRepresentation::kFloat64:
2094 __ fld_d(g.ToOperand(source));
2095 __ fld_d(g.ToOperand(destination));
2096 __ fstp_d(g.ToOperand(source));
2097 __ fstp_d(g.ToOperand(destination));
2098 break;
2099 default:
2100 UNREACHABLE();
2101 }
2102 } else {
2103 // No other combinations are possible.
2104 UNREACHABLE();
2105 }
2106}
2107
2108
2109void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2110 for (size_t index = 0; index < target_count; ++index) {
2111 __ dd(targets[index]);
2112 }
2113}
2114
2115
2116void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
2117
2118
2119void CodeGenerator::EnsureSpaceForLazyDeopt() {
2120 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2121 return;
2122 }
2123
2124 int space_needed = Deoptimizer::patch_size();
2125 // Ensure that we have enough space after the previous lazy-bailout
2126 // instruction for patching the code here.
2127 int current_pc = masm()->pc_offset();
2128 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2129 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2130 __ Nop(padding_size);
2131 }
2132}
2133
2134#undef __
2135
2136} // namespace compiler
2137} // namespace internal
2138} // namespace v8