blob: 15755703e0d6cae5f39f58db0674f6ae4b60da50 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
7#include "src/ast/scopes.h"
8#include "src/compiler/code-generator-impl.h"
9#include "src/compiler/gap-resolver.h"
10#include "src/compiler/node-matchers.h"
11#include "src/compiler/osr.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010012#include "src/frames.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000013#include "src/x87/assembler-x87.h"
14#include "src/x87/frames-x87.h"
15#include "src/x87/macro-assembler-x87.h"
16
17namespace v8 {
18namespace internal {
19namespace compiler {
20
21#define __ masm()->
22
23
24// Adds X87 specific methods for decoding operands.
25class X87OperandConverter : public InstructionOperandConverter {
26 public:
27 X87OperandConverter(CodeGenerator* gen, Instruction* instr)
28 : InstructionOperandConverter(gen, instr) {}
29
30 Operand InputOperand(size_t index, int extra = 0) {
31 return ToOperand(instr_->InputAt(index), extra);
32 }
33
34 Immediate InputImmediate(size_t index) {
35 return ToImmediate(instr_->InputAt(index));
36 }
37
38 Operand OutputOperand() { return ToOperand(instr_->Output()); }
39
40 Operand ToOperand(InstructionOperand* op, int extra = 0) {
41 if (op->IsRegister()) {
42 DCHECK(extra == 0);
43 return Operand(ToRegister(op));
44 }
45 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
46 FrameOffset offset = frame_access_state()->GetFrameOffset(
47 AllocatedOperand::cast(op)->index());
48 return Operand(offset.from_stack_pointer() ? esp : ebp,
49 offset.offset() + extra);
50 }
51
52 Operand ToMaterializableOperand(int materializable_offset) {
53 FrameOffset offset = frame_access_state()->GetFrameOffset(
Ben Murdoch097c5b22016-05-18 11:27:45 +010054 FPOffsetToFrameSlot(materializable_offset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000055 return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
56 }
57
58 Operand HighOperand(InstructionOperand* op) {
59 DCHECK(op->IsDoubleStackSlot());
60 return ToOperand(op, kPointerSize);
61 }
62
63 Immediate ToImmediate(InstructionOperand* operand) {
64 Constant constant = ToConstant(operand);
65 switch (constant.type()) {
66 case Constant::kInt32:
67 return Immediate(constant.ToInt32());
68 case Constant::kFloat32:
69 return Immediate(
70 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
71 case Constant::kFloat64:
72 return Immediate(
73 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
74 case Constant::kExternalReference:
75 return Immediate(constant.ToExternalReference());
76 case Constant::kHeapObject:
77 return Immediate(constant.ToHeapObject());
78 case Constant::kInt64:
79 break;
80 case Constant::kRpoNumber:
81 return Immediate::CodeRelativeOffset(ToLabel(operand));
82 }
83 UNREACHABLE();
84 return Immediate(-1);
85 }
86
87 static size_t NextOffset(size_t* offset) {
88 size_t i = *offset;
89 (*offset)++;
90 return i;
91 }
92
93 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
94 STATIC_ASSERT(0 == static_cast<int>(times_1));
95 STATIC_ASSERT(1 == static_cast<int>(times_2));
96 STATIC_ASSERT(2 == static_cast<int>(times_4));
97 STATIC_ASSERT(3 == static_cast<int>(times_8));
98 int scale = static_cast<int>(mode - one);
99 DCHECK(scale >= 0 && scale < 4);
100 return static_cast<ScaleFactor>(scale);
101 }
102
103 Operand MemoryOperand(size_t* offset) {
104 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
105 switch (mode) {
106 case kMode_MR: {
107 Register base = InputRegister(NextOffset(offset));
108 int32_t disp = 0;
109 return Operand(base, disp);
110 }
111 case kMode_MRI: {
112 Register base = InputRegister(NextOffset(offset));
113 int32_t disp = InputInt32(NextOffset(offset));
114 return Operand(base, disp);
115 }
116 case kMode_MR1:
117 case kMode_MR2:
118 case kMode_MR4:
119 case kMode_MR8: {
120 Register base = InputRegister(NextOffset(offset));
121 Register index = InputRegister(NextOffset(offset));
122 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
123 int32_t disp = 0;
124 return Operand(base, index, scale, disp);
125 }
126 case kMode_MR1I:
127 case kMode_MR2I:
128 case kMode_MR4I:
129 case kMode_MR8I: {
130 Register base = InputRegister(NextOffset(offset));
131 Register index = InputRegister(NextOffset(offset));
132 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
133 int32_t disp = InputInt32(NextOffset(offset));
134 return Operand(base, index, scale, disp);
135 }
136 case kMode_M1:
137 case kMode_M2:
138 case kMode_M4:
139 case kMode_M8: {
140 Register index = InputRegister(NextOffset(offset));
141 ScaleFactor scale = ScaleFor(kMode_M1, mode);
142 int32_t disp = 0;
143 return Operand(index, scale, disp);
144 }
145 case kMode_M1I:
146 case kMode_M2I:
147 case kMode_M4I:
148 case kMode_M8I: {
149 Register index = InputRegister(NextOffset(offset));
150 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
151 int32_t disp = InputInt32(NextOffset(offset));
152 return Operand(index, scale, disp);
153 }
154 case kMode_MI: {
155 int32_t disp = InputInt32(NextOffset(offset));
156 return Operand(Immediate(disp));
157 }
158 case kMode_None:
159 UNREACHABLE();
160 return Operand(no_reg, 0);
161 }
162 UNREACHABLE();
163 return Operand(no_reg, 0);
164 }
165
166 Operand MemoryOperand(size_t first_input = 0) {
167 return MemoryOperand(&first_input);
168 }
169};
170
171
172namespace {
173
174bool HasImmediateInput(Instruction* instr, size_t index) {
175 return instr->InputAt(index)->IsImmediate();
176}
177
178
179class OutOfLineLoadInteger final : public OutOfLineCode {
180 public:
181 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
182 : OutOfLineCode(gen), result_(result) {}
183
184 void Generate() final { __ xor_(result_, result_); }
185
186 private:
187 Register const result_;
188};
189
190
191class OutOfLineLoadFloat final : public OutOfLineCode {
192 public:
193 OutOfLineLoadFloat(CodeGenerator* gen, X87Register result)
194 : OutOfLineCode(gen), result_(result) {}
195
196 void Generate() final {
197 DCHECK(result_.code() == 0);
198 USE(result_);
199 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
200 __ VerifyX87StackDepth(1);
201 }
202 __ fstp(0);
203 __ push(Immediate(0xffffffff));
204 __ push(Immediate(0x7fffffff));
205 __ fld_d(MemOperand(esp, 0));
206 __ lea(esp, Operand(esp, kDoubleSize));
207 }
208
209 private:
210 X87Register const result_;
211};
212
213
214class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
215 public:
216 OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
217 X87Register input)
218 : OutOfLineCode(gen), result_(result), input_(input) {}
219
220 void Generate() final {
221 UNIMPLEMENTED();
222 USE(result_);
223 USE(input_);
224 }
225
226 private:
227 Register const result_;
228 X87Register const input_;
229};
230
231
232class OutOfLineRecordWrite final : public OutOfLineCode {
233 public:
234 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
235 Register value, Register scratch0, Register scratch1,
236 RecordWriteMode mode)
237 : OutOfLineCode(gen),
238 object_(object),
239 operand_(operand),
240 value_(value),
241 scratch0_(scratch0),
242 scratch1_(scratch1),
243 mode_(mode) {}
244
245 void Generate() final {
246 if (mode_ > RecordWriteMode::kValueIsPointer) {
247 __ JumpIfSmi(value_, exit());
248 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100249 __ CheckPageFlag(value_, scratch0_,
250 MemoryChunk::kPointersToHereAreInterestingMask, zero,
251 exit());
252 RememberedSetAction const remembered_set_action =
253 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
254 : OMIT_REMEMBERED_SET;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000255 SaveFPRegsMode const save_fp_mode =
256 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
257 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100258 remembered_set_action, save_fp_mode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000259 __ lea(scratch1_, operand_);
260 __ CallStub(&stub);
261 }
262
263 private:
264 Register const object_;
265 Operand const operand_;
266 Register const value_;
267 Register const scratch0_;
268 Register const scratch1_;
269 RecordWriteMode const mode_;
270};
271
272} // namespace
273
274
275#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
276 do { \
277 auto result = i.OutputDoubleRegister(); \
278 auto offset = i.InputRegister(0); \
279 DCHECK(result.code() == 0); \
280 if (instr->InputAt(1)->IsRegister()) { \
281 __ cmp(offset, i.InputRegister(1)); \
282 } else { \
283 __ cmp(offset, i.InputImmediate(1)); \
284 } \
285 OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
286 __ j(above_equal, ool->entry()); \
287 __ fstp(0); \
288 __ asm_instr(i.MemoryOperand(2)); \
289 __ bind(ool->exit()); \
290 } while (false)
291
292
293#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
294 do { \
295 auto result = i.OutputRegister(); \
296 auto offset = i.InputRegister(0); \
297 if (instr->InputAt(1)->IsRegister()) { \
298 __ cmp(offset, i.InputRegister(1)); \
299 } else { \
300 __ cmp(offset, i.InputImmediate(1)); \
301 } \
302 OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
303 __ j(above_equal, ool->entry()); \
304 __ asm_instr(result, i.MemoryOperand(2)); \
305 __ bind(ool->exit()); \
306 } while (false)
307
308
309#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
310 do { \
311 auto offset = i.InputRegister(0); \
312 if (instr->InputAt(1)->IsRegister()) { \
313 __ cmp(offset, i.InputRegister(1)); \
314 } else { \
315 __ cmp(offset, i.InputImmediate(1)); \
316 } \
317 Label done; \
318 DCHECK(i.InputDoubleRegister(2).code() == 0); \
319 __ j(above_equal, &done, Label::kNear); \
320 __ asm_instr(i.MemoryOperand(3)); \
321 __ bind(&done); \
322 } while (false)
323
324
325#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
326 do { \
327 auto offset = i.InputRegister(0); \
328 if (instr->InputAt(1)->IsRegister()) { \
329 __ cmp(offset, i.InputRegister(1)); \
330 } else { \
331 __ cmp(offset, i.InputImmediate(1)); \
332 } \
333 Label done; \
334 __ j(above_equal, &done, Label::kNear); \
335 if (instr->InputAt(2)->IsRegister()) { \
336 __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
337 } else { \
338 __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
339 } \
340 __ bind(&done); \
341 } while (false)
342
343
344void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
345 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
346 if (sp_slot_delta > 0) {
347 __ add(esp, Immediate(sp_slot_delta * kPointerSize));
348 }
349 frame_access_state()->SetFrameAccessToDefault();
350}
351
352
353void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
354 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
355 if (sp_slot_delta < 0) {
356 __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
357 frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
358 }
359 if (frame()->needs_frame()) {
360 __ mov(ebp, MemOperand(ebp, 0));
361 }
362 frame_access_state()->SetFrameAccessToSP();
363}
364
365
366// Assembles an instruction after register allocation, producing machine code.
367void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
368 X87OperandConverter i(this, instr);
369
370 switch (ArchOpcodeField::decode(instr->opcode())) {
371 case kArchCallCodeObject: {
372 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
373 __ VerifyX87StackDepth(1);
374 }
375 __ fstp(0);
376 EnsureSpaceForLazyDeopt();
377 if (HasImmediateInput(instr, 0)) {
378 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
379 __ call(code, RelocInfo::CODE_TARGET);
380 } else {
381 Register reg = i.InputRegister(0);
382 __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
383 __ call(reg);
384 }
385 RecordCallPosition(instr);
386 bool double_result =
387 instr->HasOutput() && instr->Output()->IsDoubleRegister();
388 if (double_result) {
389 __ lea(esp, Operand(esp, -kDoubleSize));
390 __ fstp_d(Operand(esp, 0));
391 }
392 __ fninit();
393 if (double_result) {
394 __ fld_d(Operand(esp, 0));
395 __ lea(esp, Operand(esp, kDoubleSize));
396 } else {
397 __ fld1();
398 }
399 frame_access_state()->ClearSPDelta();
400 break;
401 }
402 case kArchTailCallCodeObject: {
403 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
404 __ VerifyX87StackDepth(1);
405 }
406 __ fstp(0);
407 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
408 AssembleDeconstructActivationRecord(stack_param_delta);
409 if (HasImmediateInput(instr, 0)) {
410 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
411 __ jmp(code, RelocInfo::CODE_TARGET);
412 } else {
413 Register reg = i.InputRegister(0);
414 __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
415 __ jmp(reg);
416 }
417 frame_access_state()->ClearSPDelta();
418 break;
419 }
420 case kArchCallJSFunction: {
421 EnsureSpaceForLazyDeopt();
422 Register func = i.InputRegister(0);
423 if (FLAG_debug_code) {
424 // Check the function's context matches the context argument.
425 __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
426 __ Assert(equal, kWrongFunctionContext);
427 }
428 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
429 __ VerifyX87StackDepth(1);
430 }
431 __ fstp(0);
432 __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
433 RecordCallPosition(instr);
434 bool double_result =
435 instr->HasOutput() && instr->Output()->IsDoubleRegister();
436 if (double_result) {
437 __ lea(esp, Operand(esp, -kDoubleSize));
438 __ fstp_d(Operand(esp, 0));
439 }
440 __ fninit();
441 if (double_result) {
442 __ fld_d(Operand(esp, 0));
443 __ lea(esp, Operand(esp, kDoubleSize));
444 } else {
445 __ fld1();
446 }
447 frame_access_state()->ClearSPDelta();
448 break;
449 }
450 case kArchTailCallJSFunction: {
451 Register func = i.InputRegister(0);
452 if (FLAG_debug_code) {
453 // Check the function's context matches the context argument.
454 __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
455 __ Assert(equal, kWrongFunctionContext);
456 }
457 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
458 __ VerifyX87StackDepth(1);
459 }
460 __ fstp(0);
461 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
462 AssembleDeconstructActivationRecord(stack_param_delta);
463 __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
464 frame_access_state()->ClearSPDelta();
465 break;
466 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000467 case kArchPrepareCallCFunction: {
468 // Frame alignment requires using FP-relative frame addressing.
469 frame_access_state()->SetFrameAccessToFP();
470 int const num_parameters = MiscField::decode(instr->opcode());
471 __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
472 break;
473 }
474 case kArchPrepareTailCall:
475 AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
476 break;
477 case kArchCallCFunction: {
478 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
479 __ VerifyX87StackDepth(1);
480 }
481 __ fstp(0);
482 int const num_parameters = MiscField::decode(instr->opcode());
483 if (HasImmediateInput(instr, 0)) {
484 ExternalReference ref = i.InputExternalReference(0);
485 __ CallCFunction(ref, num_parameters);
486 } else {
487 Register func = i.InputRegister(0);
488 __ CallCFunction(func, num_parameters);
489 }
490 bool double_result =
491 instr->HasOutput() && instr->Output()->IsDoubleRegister();
492 if (double_result) {
493 __ lea(esp, Operand(esp, -kDoubleSize));
494 __ fstp_d(Operand(esp, 0));
495 }
496 __ fninit();
497 if (double_result) {
498 __ fld_d(Operand(esp, 0));
499 __ lea(esp, Operand(esp, kDoubleSize));
500 } else {
501 __ fld1();
502 }
503 frame_access_state()->SetFrameAccessToDefault();
504 frame_access_state()->ClearSPDelta();
505 break;
506 }
507 case kArchJmp:
508 AssembleArchJump(i.InputRpo(0));
509 break;
510 case kArchLookupSwitch:
511 AssembleArchLookupSwitch(instr);
512 break;
513 case kArchTableSwitch:
514 AssembleArchTableSwitch(instr);
515 break;
516 case kArchNop:
517 case kArchThrowTerminator:
518 // don't emit code for nops.
519 break;
520 case kArchDeoptimize: {
521 int deopt_state_id =
522 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
523 int double_register_param_count = 0;
524 int x87_layout = 0;
525 for (size_t i = 0; i < instr->InputCount(); i++) {
526 if (instr->InputAt(i)->IsDoubleRegister()) {
527 double_register_param_count++;
528 }
529 }
530 // Currently we use only one X87 register. If double_register_param_count
531 // is bigger than 1, it means duplicated double register is added to input
532 // of this instruction.
533 if (double_register_param_count > 0) {
534 x87_layout = (0 << 3) | 1;
535 }
536 // The layout of x87 register stack is loaded on the top of FPU register
537 // stack for deoptimization.
538 __ push(Immediate(x87_layout));
539 __ fild_s(MemOperand(esp, 0));
540 __ lea(esp, Operand(esp, kPointerSize));
541
542 Deoptimizer::BailoutType bailout_type =
543 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
544 AssembleDeoptimizerCall(deopt_state_id, bailout_type);
545 break;
546 }
547 case kArchRet:
548 AssembleReturn();
549 break;
550 case kArchFramePointer:
551 __ mov(i.OutputRegister(), ebp);
552 break;
553 case kArchStackPointer:
554 __ mov(i.OutputRegister(), esp);
555 break;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100556 case kArchParentFramePointer:
557 if (frame_access_state()->frame()->needs_frame()) {
558 __ mov(i.OutputRegister(), Operand(ebp, 0));
559 } else {
560 __ mov(i.OutputRegister(), ebp);
561 }
562 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000563 case kArchTruncateDoubleToI: {
564 if (!instr->InputAt(0)->IsDoubleRegister()) {
565 __ fld_d(i.InputOperand(0));
566 }
567 __ TruncateX87TOSToI(i.OutputRegister());
568 if (!instr->InputAt(0)->IsDoubleRegister()) {
569 __ fstp(0);
570 }
571 break;
572 }
573 case kArchStoreWithWriteBarrier: {
574 RecordWriteMode mode =
575 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
576 Register object = i.InputRegister(0);
577 size_t index = 0;
578 Operand operand = i.MemoryOperand(&index);
579 Register value = i.InputRegister(index);
580 Register scratch0 = i.TempRegister(0);
581 Register scratch1 = i.TempRegister(1);
582 auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
583 scratch0, scratch1, mode);
584 __ mov(operand, value);
585 __ CheckPageFlag(object, scratch0,
586 MemoryChunk::kPointersFromHereAreInterestingMask,
587 not_zero, ool->entry());
588 __ bind(ool->exit());
589 break;
590 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100591 case kArchStackSlot: {
592 FrameOffset offset =
593 frame_access_state()->GetFrameOffset(i.InputInt32(0));
594 Register base;
595 if (offset.from_stack_pointer()) {
596 base = esp;
597 } else {
598 base = ebp;
599 }
600 __ lea(i.OutputRegister(), Operand(base, offset.offset()));
601 break;
602 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000603 case kX87Add:
604 if (HasImmediateInput(instr, 1)) {
605 __ add(i.InputOperand(0), i.InputImmediate(1));
606 } else {
607 __ add(i.InputRegister(0), i.InputOperand(1));
608 }
609 break;
610 case kX87And:
611 if (HasImmediateInput(instr, 1)) {
612 __ and_(i.InputOperand(0), i.InputImmediate(1));
613 } else {
614 __ and_(i.InputRegister(0), i.InputOperand(1));
615 }
616 break;
617 case kX87Cmp:
Ben Murdoch097c5b22016-05-18 11:27:45 +0100618 if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
619 size_t index = 0;
620 Operand operand = i.MemoryOperand(&index);
621 if (HasImmediateInput(instr, index)) {
622 __ cmp(operand, i.InputImmediate(index));
623 } else {
624 __ cmp(operand, i.InputRegister(index));
625 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000626 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100627 if (HasImmediateInput(instr, 1)) {
628 __ cmp(i.InputOperand(0), i.InputImmediate(1));
629 } else {
630 __ cmp(i.InputRegister(0), i.InputOperand(1));
631 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000632 }
633 break;
634 case kX87Test:
Ben Murdoch097c5b22016-05-18 11:27:45 +0100635 if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
636 size_t index = 0;
637 Operand operand = i.MemoryOperand(&index);
638 if (HasImmediateInput(instr, index)) {
639 __ test(operand, i.InputImmediate(index));
640 } else {
641 __ test(i.InputRegister(index), operand);
642 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000643 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100644 if (HasImmediateInput(instr, 1)) {
645 __ test(i.InputOperand(0), i.InputImmediate(1));
646 } else {
647 __ test(i.InputRegister(0), i.InputOperand(1));
648 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000649 }
650 break;
651 case kX87Imul:
652 if (HasImmediateInput(instr, 1)) {
653 __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
654 } else {
655 __ imul(i.OutputRegister(), i.InputOperand(1));
656 }
657 break;
658 case kX87ImulHigh:
659 __ imul(i.InputRegister(1));
660 break;
661 case kX87UmulHigh:
662 __ mul(i.InputRegister(1));
663 break;
664 case kX87Idiv:
665 __ cdq();
666 __ idiv(i.InputOperand(1));
667 break;
668 case kX87Udiv:
669 __ Move(edx, Immediate(0));
670 __ div(i.InputOperand(1));
671 break;
672 case kX87Not:
673 __ not_(i.OutputOperand());
674 break;
675 case kX87Neg:
676 __ neg(i.OutputOperand());
677 break;
678 case kX87Or:
679 if (HasImmediateInput(instr, 1)) {
680 __ or_(i.InputOperand(0), i.InputImmediate(1));
681 } else {
682 __ or_(i.InputRegister(0), i.InputOperand(1));
683 }
684 break;
685 case kX87Xor:
686 if (HasImmediateInput(instr, 1)) {
687 __ xor_(i.InputOperand(0), i.InputImmediate(1));
688 } else {
689 __ xor_(i.InputRegister(0), i.InputOperand(1));
690 }
691 break;
692 case kX87Sub:
693 if (HasImmediateInput(instr, 1)) {
694 __ sub(i.InputOperand(0), i.InputImmediate(1));
695 } else {
696 __ sub(i.InputRegister(0), i.InputOperand(1));
697 }
698 break;
699 case kX87Shl:
700 if (HasImmediateInput(instr, 1)) {
701 __ shl(i.OutputOperand(), i.InputInt5(1));
702 } else {
703 __ shl_cl(i.OutputOperand());
704 }
705 break;
706 case kX87Shr:
707 if (HasImmediateInput(instr, 1)) {
708 __ shr(i.OutputOperand(), i.InputInt5(1));
709 } else {
710 __ shr_cl(i.OutputOperand());
711 }
712 break;
713 case kX87Sar:
714 if (HasImmediateInput(instr, 1)) {
715 __ sar(i.OutputOperand(), i.InputInt5(1));
716 } else {
717 __ sar_cl(i.OutputOperand());
718 }
719 break;
720 case kX87Ror:
721 if (HasImmediateInput(instr, 1)) {
722 __ ror(i.OutputOperand(), i.InputInt5(1));
723 } else {
724 __ ror_cl(i.OutputOperand());
725 }
726 break;
727 case kX87Lzcnt:
728 __ Lzcnt(i.OutputRegister(), i.InputOperand(0));
729 break;
730 case kX87Popcnt:
731 __ Popcnt(i.OutputRegister(), i.InputOperand(0));
732 break;
733 case kX87LoadFloat64Constant: {
734 InstructionOperand* source = instr->InputAt(0);
735 InstructionOperand* destination = instr->Output();
736 DCHECK(source->IsConstant());
737 X87OperandConverter g(this, nullptr);
738 Constant src_constant = g.ToConstant(source);
739
740 DCHECK_EQ(Constant::kFloat64, src_constant.type());
741 uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
742 uint32_t lower = static_cast<uint32_t>(src);
743 uint32_t upper = static_cast<uint32_t>(src >> 32);
744 if (destination->IsDoubleRegister()) {
745 __ sub(esp, Immediate(kDoubleSize));
746 __ mov(MemOperand(esp, 0), Immediate(lower));
747 __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
748 __ fstp(0);
749 __ fld_d(MemOperand(esp, 0));
750 __ add(esp, Immediate(kDoubleSize));
751 } else {
752 UNREACHABLE();
753 }
754 break;
755 }
756 case kX87Float32Cmp: {
757 __ fld_s(MemOperand(esp, kFloatSize));
758 __ fld_s(MemOperand(esp, 0));
759 __ FCmp();
760 __ lea(esp, Operand(esp, 2 * kFloatSize));
761 break;
762 }
763 case kX87Float32Add: {
764 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
765 __ VerifyX87StackDepth(1);
766 }
767 __ X87SetFPUCW(0x027F);
768 __ fstp(0);
769 __ fld_s(MemOperand(esp, 0));
770 __ fld_s(MemOperand(esp, kFloatSize));
771 __ faddp();
772 // Clear stack.
773 __ lea(esp, Operand(esp, 2 * kFloatSize));
774 // Restore the default value of control word.
775 __ X87SetFPUCW(0x037F);
776 break;
777 }
778 case kX87Float32Sub: {
779 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
780 __ VerifyX87StackDepth(1);
781 }
782 __ X87SetFPUCW(0x027F);
783 __ fstp(0);
784 __ fld_s(MemOperand(esp, kFloatSize));
785 __ fld_s(MemOperand(esp, 0));
786 __ fsubp();
787 // Clear stack.
788 __ lea(esp, Operand(esp, 2 * kFloatSize));
789 // Restore the default value of control word.
790 __ X87SetFPUCW(0x037F);
791 break;
792 }
793 case kX87Float32Mul: {
794 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
795 __ VerifyX87StackDepth(1);
796 }
797 __ X87SetFPUCW(0x027F);
798 __ fstp(0);
799 __ fld_s(MemOperand(esp, kFloatSize));
800 __ fld_s(MemOperand(esp, 0));
801 __ fmulp();
802 // Clear stack.
803 __ lea(esp, Operand(esp, 2 * kFloatSize));
804 // Restore the default value of control word.
805 __ X87SetFPUCW(0x037F);
806 break;
807 }
808 case kX87Float32Div: {
809 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
810 __ VerifyX87StackDepth(1);
811 }
812 __ X87SetFPUCW(0x027F);
813 __ fstp(0);
814 __ fld_s(MemOperand(esp, kFloatSize));
815 __ fld_s(MemOperand(esp, 0));
816 __ fdivp();
817 // Clear stack.
818 __ lea(esp, Operand(esp, 2 * kFloatSize));
819 // Restore the default value of control word.
820 __ X87SetFPUCW(0x037F);
821 break;
822 }
823 case kX87Float32Max: {
824 Label check_nan_left, check_zero, return_left, return_right;
825 Condition condition = below;
826 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
827 __ VerifyX87StackDepth(1);
828 }
829 __ fstp(0);
830 __ fld_s(MemOperand(esp, kFloatSize));
831 __ fld_s(MemOperand(esp, 0));
832 __ fld(1);
833 __ fld(1);
834 __ FCmp();
835
836 // At least one NaN.
837 // Return the second operands if one of the two operands is NaN
838 __ j(parity_even, &return_right, Label::kNear);
839 __ j(equal, &check_zero, Label::kNear); // left == right.
840 __ j(condition, &return_left, Label::kNear);
841 __ jmp(&return_right, Label::kNear);
842
843 __ bind(&check_zero);
844 __ fld(0);
845 __ fldz();
846 __ FCmp();
847 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
848
849 __ fadd(1);
850 __ jmp(&return_left, Label::kNear);
851
852 __ bind(&return_right);
853 __ fxch();
854
855 __ bind(&return_left);
856 __ fstp(0);
857 __ lea(esp, Operand(esp, 2 * kFloatSize));
858 break;
859 }
860 case kX87Float32Min: {
861 Label check_nan_left, check_zero, return_left, return_right;
862 Condition condition = above;
863 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
864 __ VerifyX87StackDepth(1);
865 }
866 __ fstp(0);
867 __ fld_s(MemOperand(esp, kFloatSize));
868 __ fld_s(MemOperand(esp, 0));
869 __ fld(1);
870 __ fld(1);
871 __ FCmp();
872 // At least one NaN.
873 // Return the second operands if one of the two operands is NaN
874 __ j(parity_even, &return_right, Label::kNear);
875 __ j(equal, &check_zero, Label::kNear); // left == right.
876 __ j(condition, &return_left, Label::kNear);
877 __ jmp(&return_right, Label::kNear);
878
879 __ bind(&check_zero);
880 __ fld(0);
881 __ fldz();
882 __ FCmp();
883 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
884 // At this point, both left and right are either 0 or -0.
885 // Push st0 and st1 to stack, then pop them to temp registers and OR them,
886 // load it to left.
887 __ push(eax);
888 __ fld(1);
889 __ fld(1);
890 __ sub(esp, Immediate(2 * kPointerSize));
891 __ fstp_s(MemOperand(esp, 0));
892 __ fstp_s(MemOperand(esp, kPointerSize));
893 __ pop(eax);
894 __ xor_(MemOperand(esp, 0), eax);
895 __ fstp(0);
896 __ fld_s(MemOperand(esp, 0));
897 __ pop(eax); // restore esp
898 __ pop(eax); // restore esp
899 __ jmp(&return_left, Label::kNear);
900
901
902 __ bind(&return_right);
903 __ fxch();
904
905 __ bind(&return_left);
906 __ fstp(0);
907 __ lea(esp, Operand(esp, 2 * kFloatSize));
908 break;
909 }
910 case kX87Float32Sqrt: {
911 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
912 __ VerifyX87StackDepth(1);
913 }
914 __ fstp(0);
915 __ fld_s(MemOperand(esp, 0));
916 __ fsqrt();
917 __ lea(esp, Operand(esp, kFloatSize));
918 break;
919 }
920 case kX87Float32Abs: {
921 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
922 __ VerifyX87StackDepth(1);
923 }
924 __ fstp(0);
925 __ fld_s(MemOperand(esp, 0));
926 __ fabs();
927 __ lea(esp, Operand(esp, kFloatSize));
928 break;
929 }
930 case kX87Float32Round: {
931 RoundingMode mode =
932 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
933 // Set the correct round mode in x87 control register
934 __ X87SetRC((mode << 10));
935
936 if (!instr->InputAt(0)->IsDoubleRegister()) {
937 InstructionOperand* input = instr->InputAt(0);
938 USE(input);
939 DCHECK(input->IsDoubleStackSlot());
940 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
941 __ VerifyX87StackDepth(1);
942 }
943 __ fstp(0);
944 __ fld_s(i.InputOperand(0));
945 }
946 __ frndint();
947 __ X87SetRC(0x0000);
948 break;
949 }
950 case kX87Float64Add: {
951 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
952 __ VerifyX87StackDepth(1);
953 }
954 __ X87SetFPUCW(0x027F);
955 __ fstp(0);
956 __ fld_d(MemOperand(esp, 0));
957 __ fld_d(MemOperand(esp, kDoubleSize));
958 __ faddp();
959 // Clear stack.
960 __ lea(esp, Operand(esp, 2 * kDoubleSize));
961 // Restore the default value of control word.
962 __ X87SetFPUCW(0x037F);
963 break;
964 }
965 case kX87Float64Sub: {
966 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
967 __ VerifyX87StackDepth(1);
968 }
969 __ X87SetFPUCW(0x027F);
970 __ fstp(0);
971 __ fld_d(MemOperand(esp, kDoubleSize));
972 __ fsub_d(MemOperand(esp, 0));
973 // Clear stack.
974 __ lea(esp, Operand(esp, 2 * kDoubleSize));
975 // Restore the default value of control word.
976 __ X87SetFPUCW(0x037F);
977 break;
978 }
979 case kX87Float64Mul: {
980 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
981 __ VerifyX87StackDepth(1);
982 }
983 __ X87SetFPUCW(0x027F);
984 __ fstp(0);
985 __ fld_d(MemOperand(esp, kDoubleSize));
986 __ fmul_d(MemOperand(esp, 0));
987 // Clear stack.
988 __ lea(esp, Operand(esp, 2 * kDoubleSize));
989 // Restore the default value of control word.
990 __ X87SetFPUCW(0x037F);
991 break;
992 }
993 case kX87Float64Div: {
994 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
995 __ VerifyX87StackDepth(1);
996 }
997 __ X87SetFPUCW(0x027F);
998 __ fstp(0);
999 __ fld_d(MemOperand(esp, kDoubleSize));
1000 __ fdiv_d(MemOperand(esp, 0));
1001 // Clear stack.
1002 __ lea(esp, Operand(esp, 2 * kDoubleSize));
1003 // Restore the default value of control word.
1004 __ X87SetFPUCW(0x037F);
1005 break;
1006 }
1007 case kX87Float64Mod: {
1008 FrameScope frame_scope(&masm_, StackFrame::MANUAL);
1009 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1010 __ VerifyX87StackDepth(1);
1011 }
1012 __ mov(eax, esp);
1013 __ PrepareCallCFunction(4, eax);
1014 __ fstp(0);
1015 __ fld_d(MemOperand(eax, 0));
1016 __ fstp_d(Operand(esp, 1 * kDoubleSize));
1017 __ fld_d(MemOperand(eax, kDoubleSize));
1018 __ fstp_d(Operand(esp, 0));
1019 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1020 4);
1021 __ lea(esp, Operand(esp, 2 * kDoubleSize));
1022 break;
1023 }
1024 case kX87Float64Max: {
1025 Label check_zero, return_left, return_right;
1026 Condition condition = below;
1027 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1028 __ VerifyX87StackDepth(1);
1029 }
1030 __ fstp(0);
1031 __ fld_d(MemOperand(esp, kDoubleSize));
1032 __ fld_d(MemOperand(esp, 0));
1033 __ fld(1);
1034 __ fld(1);
1035 __ FCmp();
1036 __ j(parity_even, &return_right,
1037 Label::kNear); // At least one NaN, Return right.
1038 __ j(equal, &check_zero, Label::kNear); // left == right.
1039 __ j(condition, &return_left, Label::kNear);
1040 __ jmp(&return_right, Label::kNear);
1041
1042 __ bind(&check_zero);
1043 __ fld(0);
1044 __ fldz();
1045 __ FCmp();
1046 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1047
1048 __ bind(&return_right);
1049 __ fxch();
1050
1051 __ bind(&return_left);
1052 __ fstp(0);
1053 __ lea(esp, Operand(esp, 2 * kDoubleSize));
1054 break;
1055 }
1056 case kX87Float64Min: {
1057 Label check_zero, return_left, return_right;
1058 Condition condition = above;
1059 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1060 __ VerifyX87StackDepth(1);
1061 }
1062 __ fstp(0);
1063 __ fld_d(MemOperand(esp, kDoubleSize));
1064 __ fld_d(MemOperand(esp, 0));
1065 __ fld(1);
1066 __ fld(1);
1067 __ FCmp();
1068 __ j(parity_even, &return_right,
1069 Label::kNear); // At least one NaN, return right value.
1070 __ j(equal, &check_zero, Label::kNear); // left == right.
1071 __ j(condition, &return_left, Label::kNear);
1072 __ jmp(&return_right, Label::kNear);
1073
1074 __ bind(&check_zero);
1075 __ fld(0);
1076 __ fldz();
1077 __ FCmp();
1078 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1079
1080 __ bind(&return_right);
1081 __ fxch();
1082
1083 __ bind(&return_left);
1084 __ fstp(0);
1085 __ lea(esp, Operand(esp, 2 * kDoubleSize));
1086 break;
1087 }
1088 case kX87Float64Abs: {
1089 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1090 __ VerifyX87StackDepth(1);
1091 }
1092 __ fstp(0);
1093 __ fld_d(MemOperand(esp, 0));
1094 __ fabs();
1095 __ lea(esp, Operand(esp, kDoubleSize));
1096 break;
1097 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001098 case kX87Int32ToFloat32: {
1099 InstructionOperand* input = instr->InputAt(0);
1100 DCHECK(input->IsRegister() || input->IsStackSlot());
1101 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1102 __ VerifyX87StackDepth(1);
1103 }
1104 __ fstp(0);
1105 if (input->IsRegister()) {
1106 Register input_reg = i.InputRegister(0);
1107 __ push(input_reg);
1108 __ fild_s(Operand(esp, 0));
1109 __ pop(input_reg);
1110 } else {
1111 __ fild_s(i.InputOperand(0));
1112 }
1113 break;
1114 }
1115 case kX87Uint32ToFloat32: {
1116 InstructionOperand* input = instr->InputAt(0);
1117 DCHECK(input->IsRegister() || input->IsStackSlot());
1118 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1119 __ VerifyX87StackDepth(1);
1120 }
1121 __ fstp(0);
1122 Label msb_set_src;
1123 Label jmp_return;
1124 // Put input integer into eax(tmporarilly)
1125 __ push(eax);
1126 if (input->IsRegister())
1127 __ mov(eax, i.InputRegister(0));
1128 else
1129 __ mov(eax, i.InputOperand(0));
1130
1131 __ test(eax, eax);
1132 __ j(sign, &msb_set_src, Label::kNear);
1133 __ push(eax);
1134 __ fild_s(Operand(esp, 0));
1135 __ pop(eax);
1136
1137 __ jmp(&jmp_return, Label::kNear);
1138 __ bind(&msb_set_src);
1139 // Need another temp reg
1140 __ push(ebx);
1141 __ mov(ebx, eax);
1142 __ shr(eax, 1);
1143 // Recover the least significant bit to avoid rounding errors.
1144 __ and_(ebx, Immediate(1));
1145 __ or_(eax, ebx);
1146 __ push(eax);
1147 __ fild_s(Operand(esp, 0));
1148 __ pop(eax);
1149 __ fld(0);
1150 __ faddp();
1151 // Restore the ebx
1152 __ pop(ebx);
1153 __ bind(&jmp_return);
1154 // Restore the eax
1155 __ pop(eax);
1156 break;
1157 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001158 case kX87Int32ToFloat64: {
1159 InstructionOperand* input = instr->InputAt(0);
1160 DCHECK(input->IsRegister() || input->IsStackSlot());
1161 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1162 __ VerifyX87StackDepth(1);
1163 }
1164 __ fstp(0);
1165 if (input->IsRegister()) {
1166 Register input_reg = i.InputRegister(0);
1167 __ push(input_reg);
1168 __ fild_s(Operand(esp, 0));
1169 __ pop(input_reg);
1170 } else {
1171 __ fild_s(i.InputOperand(0));
1172 }
1173 break;
1174 }
1175 case kX87Float32ToFloat64: {
1176 InstructionOperand* input = instr->InputAt(0);
1177 if (input->IsDoubleRegister()) {
1178 __ sub(esp, Immediate(kDoubleSize));
1179 __ fstp_d(MemOperand(esp, 0));
1180 __ fld_d(MemOperand(esp, 0));
1181 __ add(esp, Immediate(kDoubleSize));
1182 } else {
1183 DCHECK(input->IsDoubleStackSlot());
1184 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1185 __ VerifyX87StackDepth(1);
1186 }
1187 __ fstp(0);
1188 __ fld_s(i.InputOperand(0));
1189 }
1190 break;
1191 }
1192 case kX87Uint32ToFloat64: {
1193 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1194 __ VerifyX87StackDepth(1);
1195 }
1196 __ fstp(0);
1197 __ LoadUint32NoSSE2(i.InputRegister(0));
1198 break;
1199 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001200 case kX87Float32ToInt32: {
1201 if (!instr->InputAt(0)->IsDoubleRegister()) {
1202 __ fld_s(i.InputOperand(0));
1203 }
1204 __ TruncateX87TOSToI(i.OutputRegister(0));
1205 if (!instr->InputAt(0)->IsDoubleRegister()) {
1206 __ fstp(0);
1207 }
1208 break;
1209 }
1210 case kX87Float32ToUint32: {
1211 if (!instr->InputAt(0)->IsDoubleRegister()) {
1212 __ fld_s(i.InputOperand(0));
1213 }
1214 Label success;
1215 __ TruncateX87TOSToI(i.OutputRegister(0));
1216 __ test(i.OutputRegister(0), i.OutputRegister(0));
1217 __ j(positive, &success);
1218 __ push(Immediate(INT32_MIN));
1219 __ fild_s(Operand(esp, 0));
1220 __ lea(esp, Operand(esp, kPointerSize));
1221 __ faddp();
1222 __ TruncateX87TOSToI(i.OutputRegister(0));
1223 __ or_(i.OutputRegister(0), Immediate(0x80000000));
1224 __ bind(&success);
1225 if (!instr->InputAt(0)->IsDoubleRegister()) {
1226 __ fstp(0);
1227 }
1228 break;
1229 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001230 case kX87Float64ToInt32: {
1231 if (!instr->InputAt(0)->IsDoubleRegister()) {
1232 __ fld_d(i.InputOperand(0));
1233 }
1234 __ TruncateX87TOSToI(i.OutputRegister(0));
1235 if (!instr->InputAt(0)->IsDoubleRegister()) {
1236 __ fstp(0);
1237 }
1238 break;
1239 }
1240 case kX87Float64ToFloat32: {
1241 InstructionOperand* input = instr->InputAt(0);
1242 if (input->IsDoubleRegister()) {
1243 __ sub(esp, Immediate(kDoubleSize));
1244 __ fstp_s(MemOperand(esp, 0));
1245 __ fld_s(MemOperand(esp, 0));
1246 __ add(esp, Immediate(kDoubleSize));
1247 } else {
1248 DCHECK(input->IsDoubleStackSlot());
1249 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1250 __ VerifyX87StackDepth(1);
1251 }
1252 __ fstp(0);
1253 __ fld_d(i.InputOperand(0));
1254 __ sub(esp, Immediate(kDoubleSize));
1255 __ fstp_s(MemOperand(esp, 0));
1256 __ fld_s(MemOperand(esp, 0));
1257 __ add(esp, Immediate(kDoubleSize));
1258 }
1259 break;
1260 }
1261 case kX87Float64ToUint32: {
1262 __ push_imm32(-2147483648);
1263 if (!instr->InputAt(0)->IsDoubleRegister()) {
1264 __ fld_d(i.InputOperand(0));
1265 }
1266 __ fild_s(Operand(esp, 0));
1267 __ fadd(1);
1268 __ fstp(0);
1269 __ TruncateX87TOSToI(i.OutputRegister(0));
1270 __ add(esp, Immediate(kInt32Size));
1271 __ add(i.OutputRegister(), Immediate(0x80000000));
1272 if (!instr->InputAt(0)->IsDoubleRegister()) {
1273 __ fstp(0);
1274 }
1275 break;
1276 }
1277 case kX87Float64ExtractHighWord32: {
1278 if (instr->InputAt(0)->IsDoubleRegister()) {
1279 __ sub(esp, Immediate(kDoubleSize));
1280 __ fst_d(MemOperand(esp, 0));
1281 __ mov(i.OutputRegister(), MemOperand(esp, kDoubleSize / 2));
1282 __ add(esp, Immediate(kDoubleSize));
1283 } else {
1284 InstructionOperand* input = instr->InputAt(0);
1285 USE(input);
1286 DCHECK(input->IsDoubleStackSlot());
1287 __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
1288 }
1289 break;
1290 }
1291 case kX87Float64ExtractLowWord32: {
1292 if (instr->InputAt(0)->IsDoubleRegister()) {
1293 __ sub(esp, Immediate(kDoubleSize));
1294 __ fst_d(MemOperand(esp, 0));
1295 __ mov(i.OutputRegister(), MemOperand(esp, 0));
1296 __ add(esp, Immediate(kDoubleSize));
1297 } else {
1298 InstructionOperand* input = instr->InputAt(0);
1299 USE(input);
1300 DCHECK(input->IsDoubleStackSlot());
1301 __ mov(i.OutputRegister(), i.InputOperand(0));
1302 }
1303 break;
1304 }
1305 case kX87Float64InsertHighWord32: {
1306 __ sub(esp, Immediate(kDoubleSize));
1307 __ fstp_d(MemOperand(esp, 0));
1308 __ mov(MemOperand(esp, kDoubleSize / 2), i.InputRegister(1));
1309 __ fld_d(MemOperand(esp, 0));
1310 __ add(esp, Immediate(kDoubleSize));
1311 break;
1312 }
1313 case kX87Float64InsertLowWord32: {
1314 __ sub(esp, Immediate(kDoubleSize));
1315 __ fstp_d(MemOperand(esp, 0));
1316 __ mov(MemOperand(esp, 0), i.InputRegister(1));
1317 __ fld_d(MemOperand(esp, 0));
1318 __ add(esp, Immediate(kDoubleSize));
1319 break;
1320 }
1321 case kX87Float64Sqrt: {
1322 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1323 __ VerifyX87StackDepth(1);
1324 }
1325 __ X87SetFPUCW(0x027F);
1326 __ fstp(0);
1327 __ fld_d(MemOperand(esp, 0));
1328 __ fsqrt();
1329 __ lea(esp, Operand(esp, kDoubleSize));
1330 __ X87SetFPUCW(0x037F);
1331 break;
1332 }
1333 case kX87Float64Round: {
1334 RoundingMode mode =
1335 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
1336 // Set the correct round mode in x87 control register
1337 __ X87SetRC((mode << 10));
1338
1339 if (!instr->InputAt(0)->IsDoubleRegister()) {
1340 InstructionOperand* input = instr->InputAt(0);
1341 USE(input);
1342 DCHECK(input->IsDoubleStackSlot());
1343 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1344 __ VerifyX87StackDepth(1);
1345 }
1346 __ fstp(0);
1347 __ fld_d(i.InputOperand(0));
1348 }
1349 __ frndint();
1350 __ X87SetRC(0x0000);
1351 break;
1352 }
1353 case kX87Float64Cmp: {
1354 __ fld_d(MemOperand(esp, kDoubleSize));
1355 __ fld_d(MemOperand(esp, 0));
1356 __ FCmp();
1357 __ lea(esp, Operand(esp, 2 * kDoubleSize));
1358 break;
1359 }
1360 case kX87Movsxbl:
1361 __ movsx_b(i.OutputRegister(), i.MemoryOperand());
1362 break;
1363 case kX87Movzxbl:
1364 __ movzx_b(i.OutputRegister(), i.MemoryOperand());
1365 break;
1366 case kX87Movb: {
1367 size_t index = 0;
1368 Operand operand = i.MemoryOperand(&index);
1369 if (HasImmediateInput(instr, index)) {
1370 __ mov_b(operand, i.InputInt8(index));
1371 } else {
1372 __ mov_b(operand, i.InputRegister(index));
1373 }
1374 break;
1375 }
1376 case kX87Movsxwl:
1377 __ movsx_w(i.OutputRegister(), i.MemoryOperand());
1378 break;
1379 case kX87Movzxwl:
1380 __ movzx_w(i.OutputRegister(), i.MemoryOperand());
1381 break;
1382 case kX87Movw: {
1383 size_t index = 0;
1384 Operand operand = i.MemoryOperand(&index);
1385 if (HasImmediateInput(instr, index)) {
1386 __ mov_w(operand, i.InputInt16(index));
1387 } else {
1388 __ mov_w(operand, i.InputRegister(index));
1389 }
1390 break;
1391 }
1392 case kX87Movl:
1393 if (instr->HasOutput()) {
1394 __ mov(i.OutputRegister(), i.MemoryOperand());
1395 } else {
1396 size_t index = 0;
1397 Operand operand = i.MemoryOperand(&index);
1398 if (HasImmediateInput(instr, index)) {
1399 __ mov(operand, i.InputImmediate(index));
1400 } else {
1401 __ mov(operand, i.InputRegister(index));
1402 }
1403 }
1404 break;
1405 case kX87Movsd: {
1406 if (instr->HasOutput()) {
1407 X87Register output = i.OutputDoubleRegister();
1408 USE(output);
1409 DCHECK(output.code() == 0);
1410 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1411 __ VerifyX87StackDepth(1);
1412 }
1413 __ fstp(0);
1414 __ fld_d(i.MemoryOperand());
1415 } else {
1416 size_t index = 0;
1417 Operand operand = i.MemoryOperand(&index);
1418 __ fst_d(operand);
1419 }
1420 break;
1421 }
1422 case kX87Movss: {
1423 if (instr->HasOutput()) {
1424 X87Register output = i.OutputDoubleRegister();
1425 USE(output);
1426 DCHECK(output.code() == 0);
1427 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1428 __ VerifyX87StackDepth(1);
1429 }
1430 __ fstp(0);
1431 __ fld_s(i.MemoryOperand());
1432 } else {
1433 size_t index = 0;
1434 Operand operand = i.MemoryOperand(&index);
1435 __ fst_s(operand);
1436 }
1437 break;
1438 }
1439 case kX87BitcastFI: {
1440 __ mov(i.OutputRegister(), MemOperand(esp, 0));
1441 __ lea(esp, Operand(esp, kFloatSize));
1442 break;
1443 }
1444 case kX87BitcastIF: {
1445 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1446 __ VerifyX87StackDepth(1);
1447 }
1448 __ fstp(0);
1449 if (instr->InputAt(0)->IsRegister()) {
1450 __ lea(esp, Operand(esp, -kFloatSize));
1451 __ mov(MemOperand(esp, 0), i.InputRegister(0));
1452 __ fld_s(MemOperand(esp, 0));
1453 __ lea(esp, Operand(esp, kFloatSize));
1454 } else {
1455 __ fld_s(i.InputOperand(0));
1456 }
1457 break;
1458 }
1459 case kX87Lea: {
1460 AddressingMode mode = AddressingModeField::decode(instr->opcode());
1461 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
1462 // and addressing mode just happens to work out. The "addl"/"subl" forms
1463 // in these cases are faster based on measurements.
1464 if (mode == kMode_MI) {
1465 __ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
1466 } else if (i.InputRegister(0).is(i.OutputRegister())) {
1467 if (mode == kMode_MRI) {
1468 int32_t constant_summand = i.InputInt32(1);
1469 if (constant_summand > 0) {
1470 __ add(i.OutputRegister(), Immediate(constant_summand));
1471 } else if (constant_summand < 0) {
1472 __ sub(i.OutputRegister(), Immediate(-constant_summand));
1473 }
1474 } else if (mode == kMode_MR1) {
1475 if (i.InputRegister(1).is(i.OutputRegister())) {
1476 __ shl(i.OutputRegister(), 1);
1477 } else {
1478 __ lea(i.OutputRegister(), i.MemoryOperand());
1479 }
1480 } else if (mode == kMode_M2) {
1481 __ shl(i.OutputRegister(), 1);
1482 } else if (mode == kMode_M4) {
1483 __ shl(i.OutputRegister(), 2);
1484 } else if (mode == kMode_M8) {
1485 __ shl(i.OutputRegister(), 3);
1486 } else {
1487 __ lea(i.OutputRegister(), i.MemoryOperand());
1488 }
1489 } else {
1490 __ lea(i.OutputRegister(), i.MemoryOperand());
1491 }
1492 break;
1493 }
1494 case kX87Push:
1495 if (instr->InputAt(0)->IsDoubleRegister()) {
1496 auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
1497 if (allocated.representation() == MachineRepresentation::kFloat32) {
1498 __ sub(esp, Immediate(kDoubleSize));
1499 __ fst_s(Operand(esp, 0));
1500 } else {
1501 DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
1502 __ sub(esp, Immediate(kDoubleSize));
1503 __ fst_d(Operand(esp, 0));
1504 }
1505 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1506 } else if (instr->InputAt(0)->IsDoubleStackSlot()) {
1507 auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
1508 if (allocated.representation() == MachineRepresentation::kFloat32) {
1509 __ sub(esp, Immediate(kDoubleSize));
1510 __ fld_s(i.InputOperand(0));
1511 __ fstp_s(MemOperand(esp, 0));
1512 } else {
1513 DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
1514 __ sub(esp, Immediate(kDoubleSize));
1515 __ fld_d(i.InputOperand(0));
1516 __ fstp_d(MemOperand(esp, 0));
1517 }
1518 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1519 } else if (HasImmediateInput(instr, 0)) {
1520 __ push(i.InputImmediate(0));
1521 frame_access_state()->IncreaseSPDelta(1);
1522 } else {
1523 __ push(i.InputOperand(0));
1524 frame_access_state()->IncreaseSPDelta(1);
1525 }
1526 break;
1527 case kX87Poke: {
1528 int const slot = MiscField::decode(instr->opcode());
1529 if (HasImmediateInput(instr, 0)) {
1530 __ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
1531 } else {
1532 __ mov(Operand(esp, slot * kPointerSize), i.InputRegister(0));
1533 }
1534 break;
1535 }
1536 case kX87PushFloat32:
1537 __ lea(esp, Operand(esp, -kFloatSize));
1538 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1539 __ fld_s(i.InputOperand(0));
1540 __ fstp_s(MemOperand(esp, 0));
1541 } else if (instr->InputAt(0)->IsDoubleRegister()) {
1542 __ fst_s(MemOperand(esp, 0));
1543 } else {
1544 UNREACHABLE();
1545 }
1546 break;
1547 case kX87PushFloat64:
1548 __ lea(esp, Operand(esp, -kDoubleSize));
1549 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1550 __ fld_d(i.InputOperand(0));
1551 __ fstp_d(MemOperand(esp, 0));
1552 } else if (instr->InputAt(0)->IsDoubleRegister()) {
1553 __ fst_d(MemOperand(esp, 0));
1554 } else {
1555 UNREACHABLE();
1556 }
1557 break;
1558 case kCheckedLoadInt8:
1559 ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
1560 break;
1561 case kCheckedLoadUint8:
1562 ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
1563 break;
1564 case kCheckedLoadInt16:
1565 ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
1566 break;
1567 case kCheckedLoadUint16:
1568 ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
1569 break;
1570 case kCheckedLoadWord32:
1571 ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
1572 break;
1573 case kCheckedLoadFloat32:
1574 ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s);
1575 break;
1576 case kCheckedLoadFloat64:
1577 ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d);
1578 break;
1579 case kCheckedStoreWord8:
1580 ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
1581 break;
1582 case kCheckedStoreWord16:
1583 ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
1584 break;
1585 case kCheckedStoreWord32:
1586 ASSEMBLE_CHECKED_STORE_INTEGER(mov);
1587 break;
1588 case kCheckedStoreFloat32:
1589 ASSEMBLE_CHECKED_STORE_FLOAT(fst_s);
1590 break;
1591 case kCheckedStoreFloat64:
1592 ASSEMBLE_CHECKED_STORE_FLOAT(fst_d);
1593 break;
1594 case kX87StackCheck: {
1595 ExternalReference const stack_limit =
1596 ExternalReference::address_of_stack_limit(isolate());
1597 __ cmp(esp, Operand::StaticVariable(stack_limit));
1598 break;
1599 }
1600 case kCheckedLoadWord64:
1601 case kCheckedStoreWord64:
1602 UNREACHABLE(); // currently unsupported checked int64 load/store.
1603 break;
1604 }
1605} // NOLINT(readability/fn_size)
1606
1607
1608// Assembles a branch after an instruction.
1609void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1610 X87OperandConverter i(this, instr);
1611 Label::Distance flabel_distance =
1612 branch->fallthru ? Label::kNear : Label::kFar;
1613 Label* tlabel = branch->true_label;
1614 Label* flabel = branch->false_label;
1615 switch (branch->condition) {
1616 case kUnorderedEqual:
1617 __ j(parity_even, flabel, flabel_distance);
1618 // Fall through.
1619 case kEqual:
1620 __ j(equal, tlabel);
1621 break;
1622 case kUnorderedNotEqual:
1623 __ j(parity_even, tlabel);
1624 // Fall through.
1625 case kNotEqual:
1626 __ j(not_equal, tlabel);
1627 break;
1628 case kSignedLessThan:
1629 __ j(less, tlabel);
1630 break;
1631 case kSignedGreaterThanOrEqual:
1632 __ j(greater_equal, tlabel);
1633 break;
1634 case kSignedLessThanOrEqual:
1635 __ j(less_equal, tlabel);
1636 break;
1637 case kSignedGreaterThan:
1638 __ j(greater, tlabel);
1639 break;
1640 case kUnsignedLessThan:
1641 __ j(below, tlabel);
1642 break;
1643 case kUnsignedGreaterThanOrEqual:
1644 __ j(above_equal, tlabel);
1645 break;
1646 case kUnsignedLessThanOrEqual:
1647 __ j(below_equal, tlabel);
1648 break;
1649 case kUnsignedGreaterThan:
1650 __ j(above, tlabel);
1651 break;
1652 case kOverflow:
1653 __ j(overflow, tlabel);
1654 break;
1655 case kNotOverflow:
1656 __ j(no_overflow, tlabel);
1657 break;
1658 default:
1659 UNREACHABLE();
1660 break;
1661 }
1662 // Add a jump if not falling through to the next block.
1663 if (!branch->fallthru) __ jmp(flabel);
1664}
1665
1666
1667void CodeGenerator::AssembleArchJump(RpoNumber target) {
1668 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1669}
1670
1671
1672// Assembles boolean materializations after an instruction.
1673void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1674 FlagsCondition condition) {
1675 X87OperandConverter i(this, instr);
1676 Label done;
1677
1678 // Materialize a full 32-bit 1 or 0 value. The result register is always the
1679 // last output of the instruction.
1680 Label check;
1681 DCHECK_NE(0u, instr->OutputCount());
1682 Register reg = i.OutputRegister(instr->OutputCount() - 1);
1683 Condition cc = no_condition;
1684 switch (condition) {
1685 case kUnorderedEqual:
1686 __ j(parity_odd, &check, Label::kNear);
1687 __ Move(reg, Immediate(0));
1688 __ jmp(&done, Label::kNear);
1689 // Fall through.
1690 case kEqual:
1691 cc = equal;
1692 break;
1693 case kUnorderedNotEqual:
1694 __ j(parity_odd, &check, Label::kNear);
1695 __ mov(reg, Immediate(1));
1696 __ jmp(&done, Label::kNear);
1697 // Fall through.
1698 case kNotEqual:
1699 cc = not_equal;
1700 break;
1701 case kSignedLessThan:
1702 cc = less;
1703 break;
1704 case kSignedGreaterThanOrEqual:
1705 cc = greater_equal;
1706 break;
1707 case kSignedLessThanOrEqual:
1708 cc = less_equal;
1709 break;
1710 case kSignedGreaterThan:
1711 cc = greater;
1712 break;
1713 case kUnsignedLessThan:
1714 cc = below;
1715 break;
1716 case kUnsignedGreaterThanOrEqual:
1717 cc = above_equal;
1718 break;
1719 case kUnsignedLessThanOrEqual:
1720 cc = below_equal;
1721 break;
1722 case kUnsignedGreaterThan:
1723 cc = above;
1724 break;
1725 case kOverflow:
1726 cc = overflow;
1727 break;
1728 case kNotOverflow:
1729 cc = no_overflow;
1730 break;
1731 default:
1732 UNREACHABLE();
1733 break;
1734 }
1735 __ bind(&check);
1736 if (reg.is_byte_register()) {
1737 // setcc for byte registers (al, bl, cl, dl).
1738 __ setcc(cc, reg);
1739 __ movzx_b(reg, reg);
1740 } else {
1741 // Emit a branch to set a register to either 1 or 0.
1742 Label set;
1743 __ j(cc, &set, Label::kNear);
1744 __ Move(reg, Immediate(0));
1745 __ jmp(&done, Label::kNear);
1746 __ bind(&set);
1747 __ mov(reg, Immediate(1));
1748 }
1749 __ bind(&done);
1750}
1751
1752
1753void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1754 X87OperandConverter i(this, instr);
1755 Register input = i.InputRegister(0);
1756 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1757 __ cmp(input, Immediate(i.InputInt32(index + 0)));
1758 __ j(equal, GetLabel(i.InputRpo(index + 1)));
1759 }
1760 AssembleArchJump(i.InputRpo(1));
1761}
1762
1763
1764void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1765 X87OperandConverter i(this, instr);
1766 Register input = i.InputRegister(0);
1767 size_t const case_count = instr->InputCount() - 2;
1768 Label** cases = zone()->NewArray<Label*>(case_count);
1769 for (size_t index = 0; index < case_count; ++index) {
1770 cases[index] = GetLabel(i.InputRpo(index + 2));
1771 }
1772 Label* const table = AddJumpTable(cases, case_count);
1773 __ cmp(input, Immediate(case_count));
1774 __ j(above_equal, GetLabel(i.InputRpo(1)));
1775 __ jmp(Operand::JumpTable(input, times_4, table));
1776}
1777
1778
1779void CodeGenerator::AssembleDeoptimizerCall(
1780 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1781 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1782 isolate(), deoptimization_id, bailout_type);
1783 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1784}
1785
1786
1787// The calling convention for JSFunctions on X87 passes arguments on the
1788// stack and the JSFunction and context in EDI and ESI, respectively, thus
1789// the steps of the call look as follows:
1790
1791// --{ before the call instruction }--------------------------------------------
1792// | caller frame |
1793// ^ esp ^ ebp
1794
1795// --{ push arguments and setup ESI, EDI }--------------------------------------
1796// | args + receiver | caller frame |
1797// ^ esp ^ ebp
1798// [edi = JSFunction, esi = context]
1799
1800// --{ call [edi + kCodeEntryOffset] }------------------------------------------
1801// | RET | args + receiver | caller frame |
1802// ^ esp ^ ebp
1803
1804// =={ prologue of called function }============================================
1805// --{ push ebp }---------------------------------------------------------------
1806// | FP | RET | args + receiver | caller frame |
1807// ^ esp ^ ebp
1808
1809// --{ mov ebp, esp }-----------------------------------------------------------
1810// | FP | RET | args + receiver | caller frame |
1811// ^ ebp,esp
1812
1813// --{ push esi }---------------------------------------------------------------
1814// | CTX | FP | RET | args + receiver | caller frame |
1815// ^esp ^ ebp
1816
1817// --{ push edi }---------------------------------------------------------------
1818// | FNC | CTX | FP | RET | args + receiver | caller frame |
1819// ^esp ^ ebp
1820
1821// --{ subi esp, #N }-----------------------------------------------------------
1822// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
1823// ^esp ^ ebp
1824
1825// =={ body of called function }================================================
1826
1827// =={ epilogue of called function }============================================
1828// --{ mov esp, ebp }-----------------------------------------------------------
1829// | FP | RET | args + receiver | caller frame |
1830// ^ esp,ebp
1831
1832// --{ pop ebp }-----------------------------------------------------------
1833// | | RET | args + receiver | caller frame |
1834// ^ esp ^ ebp
1835
1836// --{ ret #A+1 }-----------------------------------------------------------
1837// | | caller frame |
1838// ^ esp ^ ebp
1839
1840
1841// Runtime function calls are accomplished by doing a stub call to the
1842// CEntryStub (a real code object). On X87 passes arguments on the
1843// stack, the number of arguments in EAX, the address of the runtime function
1844// in EBX, and the context in ESI.
1845
1846// --{ before the call instruction }--------------------------------------------
1847// | caller frame |
1848// ^ esp ^ ebp
1849
1850// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
1851// | args + receiver | caller frame |
1852// ^ esp ^ ebp
1853// [eax = #args, ebx = runtime function, esi = context]
1854
1855// --{ call #CEntryStub }-------------------------------------------------------
1856// | RET | args + receiver | caller frame |
1857// ^ esp ^ ebp
1858
1859// =={ body of runtime function }===============================================
1860
1861// --{ runtime returns }--------------------------------------------------------
1862// | caller frame |
1863// ^ esp ^ ebp
1864
1865// Other custom linkages (e.g. for calling directly into and out of C++) may
1866// need to save callee-saved registers on the stack, which is done in the
1867// function prologue of generated code.
1868
1869// --{ before the call instruction }--------------------------------------------
1870// | caller frame |
1871// ^ esp ^ ebp
1872
1873// --{ set up arguments in registers on stack }---------------------------------
1874// | args | caller frame |
1875// ^ esp ^ ebp
1876// [r0 = arg0, r1 = arg1, ...]
1877
1878// --{ call code }--------------------------------------------------------------
1879// | RET | args | caller frame |
1880// ^ esp ^ ebp
1881
1882// =={ prologue of called function }============================================
1883// --{ push ebp }---------------------------------------------------------------
1884// | FP | RET | args | caller frame |
1885// ^ esp ^ ebp
1886
1887// --{ mov ebp, esp }-----------------------------------------------------------
1888// | FP | RET | args | caller frame |
1889// ^ ebp,esp
1890
1891// --{ save registers }---------------------------------------------------------
1892// | regs | FP | RET | args | caller frame |
1893// ^ esp ^ ebp
1894
1895// --{ subi esp, #N }-----------------------------------------------------------
1896// | callee frame | regs | FP | RET | args | caller frame |
1897// ^esp ^ ebp
1898
1899// =={ body of called function }================================================
1900
1901// =={ epilogue of called function }============================================
1902// --{ restore registers }------------------------------------------------------
1903// | regs | FP | RET | args | caller frame |
1904// ^ esp ^ ebp
1905
1906// --{ mov esp, ebp }-----------------------------------------------------------
1907// | FP | RET | args | caller frame |
1908// ^ esp,ebp
1909
1910// --{ pop ebp }----------------------------------------------------------------
1911// | RET | args | caller frame |
1912// ^ esp ^ ebp
1913
1914
1915void CodeGenerator::AssemblePrologue() {
1916 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1917 if (descriptor->IsCFunctionCall()) {
1918 // Assemble a prologue similar the to cdecl calling convention.
1919 __ push(ebp);
1920 __ mov(ebp, esp);
1921 } else if (descriptor->IsJSFunctionCall()) {
1922 // TODO(turbofan): this prologue is redundant with OSR, but needed for
1923 // code aging.
1924 __ Prologue(this->info()->GeneratePreagedPrologue());
1925 } else if (frame()->needs_frame()) {
1926 __ StubPrologue();
1927 } else {
1928 frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
1929 }
1930 frame_access_state()->SetFrameAccessToDefault();
1931
1932 int stack_shrink_slots = frame()->GetSpillSlotCount();
1933 if (info()->is_osr()) {
1934 // TurboFan OSR-compiled functions cannot be entered directly.
1935 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1936
1937 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1938 // frame is still on the stack. Optimized code uses OSR values directly from
1939 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1940 // remaining stack slots.
1941 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1942 osr_pc_offset_ = __ pc_offset();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001943 stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1944 }
1945
1946 const RegList saves = descriptor->CalleeSavedRegisters();
1947 if (stack_shrink_slots > 0) {
1948 __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
1949 }
1950
1951 if (saves != 0) { // Save callee-saved registers.
1952 DCHECK(!info()->is_osr());
1953 int pushed = 0;
1954 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1955 if (!((1 << i) & saves)) continue;
1956 __ push(Register::from_code(i));
1957 ++pushed;
1958 }
1959 frame()->AllocateSavedCalleeRegisterSlots(pushed);
1960 }
1961
1962 // Initailize FPU state.
1963 __ fninit();
1964 __ fld1();
1965}
1966
1967
1968void CodeGenerator::AssembleReturn() {
1969 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1970
1971 // Clear the FPU stack only if there is no return value in the stack.
1972 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
1973 __ VerifyX87StackDepth(1);
1974 }
1975 bool clear_stack = true;
1976 for (int i = 0; i < descriptor->ReturnCount(); i++) {
1977 MachineRepresentation rep = descriptor->GetReturnType(i).representation();
1978 LinkageLocation loc = descriptor->GetReturnLocation(i);
1979 if (IsFloatingPoint(rep) && loc == LinkageLocation::ForRegister(0)) {
1980 clear_stack = false;
1981 break;
1982 }
1983 }
1984 if (clear_stack) __ fstp(0);
1985
1986 int pop_count = static_cast<int>(descriptor->StackParameterCount());
1987 const RegList saves = descriptor->CalleeSavedRegisters();
1988 // Restore registers.
1989 if (saves != 0) {
1990 for (int i = 0; i < Register::kNumRegisters; i++) {
1991 if (!((1 << i) & saves)) continue;
1992 __ pop(Register::from_code(i));
1993 }
1994 }
1995
1996 if (descriptor->IsCFunctionCall()) {
1997 __ mov(esp, ebp); // Move stack pointer back to frame pointer.
1998 __ pop(ebp); // Pop caller's frame pointer.
1999 } else if (frame()->needs_frame()) {
2000 // Canonicalize JSFunction return sites for now.
2001 if (return_label_.is_bound()) {
2002 __ jmp(&return_label_);
2003 return;
2004 } else {
2005 __ bind(&return_label_);
2006 __ mov(esp, ebp); // Move stack pointer back to frame pointer.
2007 __ pop(ebp); // Pop caller's frame pointer.
2008 }
2009 }
2010 if (pop_count == 0) {
2011 __ ret(0);
2012 } else {
2013 __ Ret(pop_count * kPointerSize, ebx);
2014 }
2015}
2016
2017
2018void CodeGenerator::AssembleMove(InstructionOperand* source,
2019 InstructionOperand* destination) {
2020 X87OperandConverter g(this, nullptr);
2021 // Dispatch on the source and destination operand kinds. Not all
2022 // combinations are possible.
2023 if (source->IsRegister()) {
2024 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2025 Register src = g.ToRegister(source);
2026 Operand dst = g.ToOperand(destination);
2027 __ mov(dst, src);
2028 } else if (source->IsStackSlot()) {
2029 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2030 Operand src = g.ToOperand(source);
2031 if (destination->IsRegister()) {
2032 Register dst = g.ToRegister(destination);
2033 __ mov(dst, src);
2034 } else {
2035 Operand dst = g.ToOperand(destination);
2036 __ push(src);
2037 __ pop(dst);
2038 }
2039 } else if (source->IsConstant()) {
2040 Constant src_constant = g.ToConstant(source);
2041 if (src_constant.type() == Constant::kHeapObject) {
2042 Handle<HeapObject> src = src_constant.ToHeapObject();
2043 int offset;
2044 if (IsMaterializableFromFrame(src, &offset)) {
2045 if (destination->IsRegister()) {
2046 Register dst = g.ToRegister(destination);
2047 __ mov(dst, g.ToMaterializableOperand(offset));
2048 } else {
2049 DCHECK(destination->IsStackSlot());
2050 Operand dst = g.ToOperand(destination);
2051 __ push(g.ToMaterializableOperand(offset));
2052 __ pop(dst);
2053 }
2054 } else if (destination->IsRegister()) {
2055 Register dst = g.ToRegister(destination);
2056 __ LoadHeapObject(dst, src);
2057 } else {
2058 DCHECK(destination->IsStackSlot());
2059 Operand dst = g.ToOperand(destination);
2060 AllowDeferredHandleDereference embedding_raw_address;
2061 if (isolate()->heap()->InNewSpace(*src)) {
2062 __ PushHeapObject(src);
2063 __ pop(dst);
2064 } else {
2065 __ mov(dst, src);
2066 }
2067 }
2068 } else if (destination->IsRegister()) {
2069 Register dst = g.ToRegister(destination);
2070 __ Move(dst, g.ToImmediate(source));
2071 } else if (destination->IsStackSlot()) {
2072 Operand dst = g.ToOperand(destination);
2073 __ Move(dst, g.ToImmediate(source));
2074 } else if (src_constant.type() == Constant::kFloat32) {
2075 // TODO(turbofan): Can we do better here?
2076 uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
2077 if (destination->IsDoubleRegister()) {
2078 __ sub(esp, Immediate(kInt32Size));
2079 __ mov(MemOperand(esp, 0), Immediate(src));
2080 // always only push one value into the x87 stack.
2081 __ fstp(0);
2082 __ fld_s(MemOperand(esp, 0));
2083 __ add(esp, Immediate(kInt32Size));
2084 } else {
2085 DCHECK(destination->IsDoubleStackSlot());
2086 Operand dst = g.ToOperand(destination);
2087 __ Move(dst, Immediate(src));
2088 }
2089 } else {
2090 DCHECK_EQ(Constant::kFloat64, src_constant.type());
2091 uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
2092 uint32_t lower = static_cast<uint32_t>(src);
2093 uint32_t upper = static_cast<uint32_t>(src >> 32);
2094 if (destination->IsDoubleRegister()) {
2095 __ sub(esp, Immediate(kDoubleSize));
2096 __ mov(MemOperand(esp, 0), Immediate(lower));
2097 __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
2098 // always only push one value into the x87 stack.
2099 __ fstp(0);
2100 __ fld_d(MemOperand(esp, 0));
2101 __ add(esp, Immediate(kDoubleSize));
2102 } else {
2103 DCHECK(destination->IsDoubleStackSlot());
2104 Operand dst0 = g.ToOperand(destination);
2105 Operand dst1 = g.HighOperand(destination);
2106 __ Move(dst0, Immediate(lower));
2107 __ Move(dst1, Immediate(upper));
2108 }
2109 }
2110 } else if (source->IsDoubleRegister()) {
2111 DCHECK(destination->IsDoubleStackSlot());
2112 Operand dst = g.ToOperand(destination);
2113 auto allocated = AllocatedOperand::cast(*source);
2114 switch (allocated.representation()) {
2115 case MachineRepresentation::kFloat32:
2116 __ fst_s(dst);
2117 break;
2118 case MachineRepresentation::kFloat64:
2119 __ fst_d(dst);
2120 break;
2121 default:
2122 UNREACHABLE();
2123 }
2124 } else if (source->IsDoubleStackSlot()) {
2125 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
2126 Operand src = g.ToOperand(source);
2127 auto allocated = AllocatedOperand::cast(*source);
2128 if (destination->IsDoubleRegister()) {
2129 // always only push one value into the x87 stack.
2130 __ fstp(0);
2131 switch (allocated.representation()) {
2132 case MachineRepresentation::kFloat32:
2133 __ fld_s(src);
2134 break;
2135 case MachineRepresentation::kFloat64:
2136 __ fld_d(src);
2137 break;
2138 default:
2139 UNREACHABLE();
2140 }
2141 } else {
2142 Operand dst = g.ToOperand(destination);
2143 switch (allocated.representation()) {
2144 case MachineRepresentation::kFloat32:
2145 __ fld_s(src);
2146 __ fstp_s(dst);
2147 break;
2148 case MachineRepresentation::kFloat64:
2149 __ fld_d(src);
2150 __ fstp_d(dst);
2151 break;
2152 default:
2153 UNREACHABLE();
2154 }
2155 }
2156 } else {
2157 UNREACHABLE();
2158 }
2159}
2160
2161
2162void CodeGenerator::AssembleSwap(InstructionOperand* source,
2163 InstructionOperand* destination) {
2164 X87OperandConverter g(this, nullptr);
2165 // Dispatch on the source and destination operand kinds. Not all
2166 // combinations are possible.
2167 if (source->IsRegister() && destination->IsRegister()) {
2168 // Register-register.
2169 Register src = g.ToRegister(source);
2170 Register dst = g.ToRegister(destination);
2171 __ xchg(dst, src);
2172 } else if (source->IsRegister() && destination->IsStackSlot()) {
2173 // Register-memory.
2174 __ xchg(g.ToRegister(source), g.ToOperand(destination));
2175 } else if (source->IsStackSlot() && destination->IsStackSlot()) {
2176 // Memory-memory.
2177 Operand dst1 = g.ToOperand(destination);
2178 __ push(dst1);
2179 frame_access_state()->IncreaseSPDelta(1);
2180 Operand src1 = g.ToOperand(source);
2181 __ push(src1);
2182 Operand dst2 = g.ToOperand(destination);
2183 __ pop(dst2);
2184 frame_access_state()->IncreaseSPDelta(-1);
2185 Operand src2 = g.ToOperand(source);
2186 __ pop(src2);
2187 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
2188 UNREACHABLE();
2189 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
2190 auto allocated = AllocatedOperand::cast(*source);
2191 switch (allocated.representation()) {
2192 case MachineRepresentation::kFloat32:
2193 __ fld_s(g.ToOperand(destination));
2194 __ fxch();
2195 __ fstp_s(g.ToOperand(destination));
2196 break;
2197 case MachineRepresentation::kFloat64:
2198 __ fld_d(g.ToOperand(destination));
2199 __ fxch();
2200 __ fstp_d(g.ToOperand(destination));
2201 break;
2202 default:
2203 UNREACHABLE();
2204 }
2205 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
2206 auto allocated = AllocatedOperand::cast(*source);
2207 switch (allocated.representation()) {
2208 case MachineRepresentation::kFloat32:
2209 __ fld_s(g.ToOperand(source));
2210 __ fld_s(g.ToOperand(destination));
2211 __ fstp_s(g.ToOperand(source));
2212 __ fstp_s(g.ToOperand(destination));
2213 break;
2214 case MachineRepresentation::kFloat64:
2215 __ fld_d(g.ToOperand(source));
2216 __ fld_d(g.ToOperand(destination));
2217 __ fstp_d(g.ToOperand(source));
2218 __ fstp_d(g.ToOperand(destination));
2219 break;
2220 default:
2221 UNREACHABLE();
2222 }
2223 } else {
2224 // No other combinations are possible.
2225 UNREACHABLE();
2226 }
2227}
2228
2229
2230void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2231 for (size_t index = 0; index < target_count; ++index) {
2232 __ dd(targets[index]);
2233 }
2234}
2235
2236
2237void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
2238
2239
2240void CodeGenerator::EnsureSpaceForLazyDeopt() {
2241 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2242 return;
2243 }
2244
2245 int space_needed = Deoptimizer::patch_size();
2246 // Ensure that we have enough space after the previous lazy-bailout
2247 // instruction for patching the code here.
2248 int current_pc = masm()->pc_offset();
2249 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2250 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2251 __ Nop(padding_size);
2252 }
2253}
2254
2255#undef __
2256
2257} // namespace compiler
2258} // namespace internal
2259} // namespace v8