blob: ee05ad00b6ef4e25be89793b4fdd1b1d8dd5fb51 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007#include "src/ast/scopes.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00008#include "src/compiler/code-generator-impl.h"
9#include "src/compiler/gap-resolver.h"
10#include "src/compiler/node-matchers.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011#include "src/compiler/osr.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010012#include "src/frames.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000013#include "src/ia32/assembler-ia32.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000014#include "src/ia32/frames-ia32.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000015#include "src/ia32/macro-assembler-ia32.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000016
17namespace v8 {
18namespace internal {
19namespace compiler {
20
21#define __ masm()->
22
23
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000024#define kScratchDoubleReg xmm0
25
26
Ben Murdochb8a8cc12014-11-26 15:28:44 +000027// Adds IA-32 specific methods for decoding operands.
28class IA32OperandConverter : public InstructionOperandConverter {
29 public:
30 IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
31 : InstructionOperandConverter(gen, instr) {}
32
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000033 Operand InputOperand(size_t index, int extra = 0) {
34 return ToOperand(instr_->InputAt(index), extra);
35 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000037 Immediate InputImmediate(size_t index) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000038 return ToImmediate(instr_->InputAt(index));
39 }
40
41 Operand OutputOperand() { return ToOperand(instr_->Output()); }
42
Ben Murdochb8a8cc12014-11-26 15:28:44 +000043 Operand ToOperand(InstructionOperand* op, int extra = 0) {
44 if (op->IsRegister()) {
45 DCHECK(extra == 0);
46 return Operand(ToRegister(op));
47 } else if (op->IsDoubleRegister()) {
48 DCHECK(extra == 0);
49 return Operand(ToDoubleRegister(op));
50 }
51 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
Ben Murdochda12d292016-06-02 14:46:10 +010052 return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000053 }
54
Ben Murdochda12d292016-06-02 14:46:10 +010055 Operand SlotToOperand(int slot, int extra = 0) {
56 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
57 return Operand(offset.from_stack_pointer() ? esp : ebp,
58 offset.offset() + extra);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000059 }
60
61 Operand HighOperand(InstructionOperand* op) {
62 DCHECK(op->IsDoubleStackSlot());
63 return ToOperand(op, kPointerSize);
64 }
65
66 Immediate ToImmediate(InstructionOperand* operand) {
67 Constant constant = ToConstant(operand);
68 switch (constant.type()) {
69 case Constant::kInt32:
70 return Immediate(constant.ToInt32());
Emily Bernierd0a1eb72015-03-24 16:35:39 -040071 case Constant::kFloat32:
72 return Immediate(
73 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +000074 case Constant::kFloat64:
75 return Immediate(
76 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
77 case Constant::kExternalReference:
78 return Immediate(constant.ToExternalReference());
79 case Constant::kHeapObject:
80 return Immediate(constant.ToHeapObject());
81 case Constant::kInt64:
82 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -040083 case Constant::kRpoNumber:
84 return Immediate::CodeRelativeOffset(ToLabel(operand));
Ben Murdochb8a8cc12014-11-26 15:28:44 +000085 }
86 UNREACHABLE();
87 return Immediate(-1);
88 }
89
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000090 static size_t NextOffset(size_t* offset) {
91 size_t i = *offset;
Emily Bernierd0a1eb72015-03-24 16:35:39 -040092 (*offset)++;
93 return i;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000094 }
95
Emily Bernierd0a1eb72015-03-24 16:35:39 -040096 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
97 STATIC_ASSERT(0 == static_cast<int>(times_1));
98 STATIC_ASSERT(1 == static_cast<int>(times_2));
99 STATIC_ASSERT(2 == static_cast<int>(times_4));
100 STATIC_ASSERT(3 == static_cast<int>(times_8));
101 int scale = static_cast<int>(mode - one);
102 DCHECK(scale >= 0 && scale < 4);
103 return static_cast<ScaleFactor>(scale);
104 }
105
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000106 Operand MemoryOperand(size_t* offset) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400107 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
108 switch (mode) {
109 case kMode_MR: {
110 Register base = InputRegister(NextOffset(offset));
111 int32_t disp = 0;
112 return Operand(base, disp);
113 }
114 case kMode_MRI: {
115 Register base = InputRegister(NextOffset(offset));
116 int32_t disp = InputInt32(NextOffset(offset));
117 return Operand(base, disp);
118 }
119 case kMode_MR1:
120 case kMode_MR2:
121 case kMode_MR4:
122 case kMode_MR8: {
123 Register base = InputRegister(NextOffset(offset));
124 Register index = InputRegister(NextOffset(offset));
125 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
126 int32_t disp = 0;
127 return Operand(base, index, scale, disp);
128 }
129 case kMode_MR1I:
130 case kMode_MR2I:
131 case kMode_MR4I:
132 case kMode_MR8I: {
133 Register base = InputRegister(NextOffset(offset));
134 Register index = InputRegister(NextOffset(offset));
135 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
136 int32_t disp = InputInt32(NextOffset(offset));
137 return Operand(base, index, scale, disp);
138 }
139 case kMode_M1:
140 case kMode_M2:
141 case kMode_M4:
142 case kMode_M8: {
143 Register index = InputRegister(NextOffset(offset));
144 ScaleFactor scale = ScaleFor(kMode_M1, mode);
145 int32_t disp = 0;
146 return Operand(index, scale, disp);
147 }
148 case kMode_M1I:
149 case kMode_M2I:
150 case kMode_M4I:
151 case kMode_M8I: {
152 Register index = InputRegister(NextOffset(offset));
153 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
154 int32_t disp = InputInt32(NextOffset(offset));
155 return Operand(index, scale, disp);
156 }
157 case kMode_MI: {
158 int32_t disp = InputInt32(NextOffset(offset));
159 return Operand(Immediate(disp));
160 }
161 case kMode_None:
162 UNREACHABLE();
163 return Operand(no_reg, 0);
164 }
165 UNREACHABLE();
166 return Operand(no_reg, 0);
167 }
168
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000169 Operand MemoryOperand(size_t first_input = 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000170 return MemoryOperand(&first_input);
171 }
172};
173
174
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400175namespace {
176
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000177bool HasImmediateInput(Instruction* instr, size_t index) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000178 return instr->InputAt(index)->IsImmediate();
179}
180
181
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000182class OutOfLineLoadInteger final : public OutOfLineCode {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400183 public:
184 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
185 : OutOfLineCode(gen), result_(result) {}
186
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000187 void Generate() final { __ xor_(result_, result_); }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400188
189 private:
190 Register const result_;
191};
192
193
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000194class OutOfLineLoadFloat final : public OutOfLineCode {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400195 public:
196 OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
197 : OutOfLineCode(gen), result_(result) {}
198
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000199 void Generate() final { __ pcmpeqd(result_, result_); }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400200
201 private:
202 XMMRegister const result_;
203};
204
205
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000206class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400207 public:
208 OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
209 XMMRegister input)
210 : OutOfLineCode(gen), result_(result), input_(input) {}
211
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000212 void Generate() final {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400213 __ sub(esp, Immediate(kDoubleSize));
214 __ movsd(MemOperand(esp, 0), input_);
215 __ SlowTruncateToI(result_, esp, 0);
216 __ add(esp, Immediate(kDoubleSize));
217 }
218
219 private:
220 Register const result_;
221 XMMRegister const input_;
222};
223
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000224
225class OutOfLineRecordWrite final : public OutOfLineCode {
226 public:
227 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
228 Register value, Register scratch0, Register scratch1,
229 RecordWriteMode mode)
230 : OutOfLineCode(gen),
231 object_(object),
232 operand_(operand),
233 value_(value),
234 scratch0_(scratch0),
235 scratch1_(scratch1),
236 mode_(mode) {}
237
238 void Generate() final {
239 if (mode_ > RecordWriteMode::kValueIsPointer) {
240 __ JumpIfSmi(value_, exit());
241 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100242 __ CheckPageFlag(value_, scratch0_,
243 MemoryChunk::kPointersToHereAreInterestingMask, zero,
244 exit());
245 RememberedSetAction const remembered_set_action =
246 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
247 : OMIT_REMEMBERED_SET;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000248 SaveFPRegsMode const save_fp_mode =
249 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
250 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100251 remembered_set_action, save_fp_mode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000252 __ lea(scratch1_, operand_);
253 __ CallStub(&stub);
254 }
255
256 private:
257 Register const object_;
258 Operand const operand_;
259 Register const value_;
260 Register const scratch0_;
261 Register const scratch1_;
262 RecordWriteMode const mode_;
263};
264
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400265} // namespace
266
267
268#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
269 do { \
270 auto result = i.OutputDoubleRegister(); \
271 auto offset = i.InputRegister(0); \
272 if (instr->InputAt(1)->IsRegister()) { \
273 __ cmp(offset, i.InputRegister(1)); \
274 } else { \
275 __ cmp(offset, i.InputImmediate(1)); \
276 } \
277 OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
278 __ j(above_equal, ool->entry()); \
279 __ asm_instr(result, i.MemoryOperand(2)); \
280 __ bind(ool->exit()); \
281 } while (false)
282
283
284#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
285 do { \
286 auto result = i.OutputRegister(); \
287 auto offset = i.InputRegister(0); \
288 if (instr->InputAt(1)->IsRegister()) { \
289 __ cmp(offset, i.InputRegister(1)); \
290 } else { \
291 __ cmp(offset, i.InputImmediate(1)); \
292 } \
293 OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
294 __ j(above_equal, ool->entry()); \
295 __ asm_instr(result, i.MemoryOperand(2)); \
296 __ bind(ool->exit()); \
297 } while (false)
298
299
300#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
301 do { \
302 auto offset = i.InputRegister(0); \
303 if (instr->InputAt(1)->IsRegister()) { \
304 __ cmp(offset, i.InputRegister(1)); \
305 } else { \
306 __ cmp(offset, i.InputImmediate(1)); \
307 } \
308 Label done; \
309 __ j(above_equal, &done, Label::kNear); \
310 __ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
311 __ bind(&done); \
312 } while (false)
313
314
315#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
316 do { \
317 auto offset = i.InputRegister(0); \
318 if (instr->InputAt(1)->IsRegister()) { \
319 __ cmp(offset, i.InputRegister(1)); \
320 } else { \
321 __ cmp(offset, i.InputImmediate(1)); \
322 } \
323 Label done; \
324 __ j(above_equal, &done, Label::kNear); \
325 if (instr->InputAt(2)->IsRegister()) { \
326 __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
327 } else { \
328 __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
329 } \
330 __ bind(&done); \
331 } while (false)
332
Ben Murdochda12d292016-06-02 14:46:10 +0100333#define ASSEMBLE_COMPARE(asm_instr) \
334 do { \
335 if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
336 size_t index = 0; \
337 Operand left = i.MemoryOperand(&index); \
338 if (HasImmediateInput(instr, index)) { \
339 __ asm_instr(left, i.InputImmediate(index)); \
340 } else { \
341 __ asm_instr(left, i.InputRegister(index)); \
342 } \
343 } else { \
344 if (HasImmediateInput(instr, 1)) { \
345 if (instr->InputAt(0)->IsRegister()) { \
346 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
347 } else { \
348 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
349 } \
350 } else { \
351 if (instr->InputAt(1)->IsRegister()) { \
352 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
353 } else { \
354 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
355 } \
356 } \
357 } \
358 } while (0)
359
360void CodeGenerator::AssembleDeconstructFrame() {
361 __ mov(esp, ebp);
362 __ pop(ebp);
363}
364
365void CodeGenerator::AssembleSetupStackPointer() {}
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400366
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000367void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
368 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
369 if (sp_slot_delta > 0) {
370 __ add(esp, Immediate(sp_slot_delta * kPointerSize));
371 }
372 frame_access_state()->SetFrameAccessToDefault();
373}
374
375
376void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
377 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
378 if (sp_slot_delta < 0) {
379 __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
380 frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
381 }
Ben Murdochda12d292016-06-02 14:46:10 +0100382 if (frame_access_state()->has_frame()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000383 __ mov(ebp, MemOperand(ebp, 0));
384 }
385 frame_access_state()->SetFrameAccessToSP();
386}
387
Ben Murdochda12d292016-06-02 14:46:10 +0100388void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
389 Register, Register,
390 Register) {
391 // There are not enough temp registers left on ia32 for a call instruction
392 // so we pick some scratch registers and save/restore them manually here.
393 int scratch_count = 3;
394 Register scratch1 = ebx;
395 Register scratch2 = ecx;
396 Register scratch3 = edx;
397 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
398 Label done;
399
400 // Check if current frame is an arguments adaptor frame.
401 __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
402 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
403 __ j(not_equal, &done, Label::kNear);
404
405 __ push(scratch1);
406 __ push(scratch2);
407 __ push(scratch3);
408
409 // Load arguments count from current arguments adaptor frame (note, it
410 // does not include receiver).
411 Register caller_args_count_reg = scratch1;
412 __ mov(caller_args_count_reg,
413 Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
414 __ SmiUntag(caller_args_count_reg);
415
416 ParameterCount callee_args_count(args_reg);
417 __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
418 scratch3, ReturnAddressState::kOnStack, scratch_count);
419 __ pop(scratch3);
420 __ pop(scratch2);
421 __ pop(scratch1);
422
423 __ bind(&done);
424}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000425
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000426// Assembles an instruction after register allocation, producing machine code.
427void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
428 IA32OperandConverter i(this, instr);
Ben Murdochda12d292016-06-02 14:46:10 +0100429 InstructionCode opcode = instr->opcode();
430 ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
431 switch (arch_opcode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000432 case kArchCallCodeObject: {
433 EnsureSpaceForLazyDeopt();
434 if (HasImmediateInput(instr, 0)) {
435 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
436 __ call(code, RelocInfo::CODE_TARGET);
437 } else {
438 Register reg = i.InputRegister(0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000439 __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
440 __ call(reg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000441 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000442 RecordCallPosition(instr);
443 frame_access_state()->ClearSPDelta();
444 break;
445 }
Ben Murdochda12d292016-06-02 14:46:10 +0100446 case kArchTailCallCodeObjectFromJSFunction:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000447 case kArchTailCallCodeObject: {
448 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
449 AssembleDeconstructActivationRecord(stack_param_delta);
Ben Murdochda12d292016-06-02 14:46:10 +0100450 if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
451 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
452 no_reg, no_reg, no_reg);
453 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000454 if (HasImmediateInput(instr, 0)) {
455 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
456 __ jmp(code, RelocInfo::CODE_TARGET);
457 } else {
458 Register reg = i.InputRegister(0);
459 __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
460 __ jmp(reg);
461 }
462 frame_access_state()->ClearSPDelta();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000463 break;
464 }
465 case kArchCallJSFunction: {
466 EnsureSpaceForLazyDeopt();
467 Register func = i.InputRegister(0);
468 if (FLAG_debug_code) {
469 // Check the function's context matches the context argument.
470 __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
471 __ Assert(equal, kWrongFunctionContext);
472 }
473 __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000474 RecordCallPosition(instr);
475 frame_access_state()->ClearSPDelta();
476 break;
477 }
Ben Murdochda12d292016-06-02 14:46:10 +0100478 case kArchTailCallJSFunctionFromJSFunction:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000479 case kArchTailCallJSFunction: {
480 Register func = i.InputRegister(0);
481 if (FLAG_debug_code) {
482 // Check the function's context matches the context argument.
483 __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
484 __ Assert(equal, kWrongFunctionContext);
485 }
486 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
487 AssembleDeconstructActivationRecord(stack_param_delta);
Ben Murdochda12d292016-06-02 14:46:10 +0100488 if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
489 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
490 no_reg, no_reg, no_reg);
491 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000492 __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
493 frame_access_state()->ClearSPDelta();
494 break;
495 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000496 case kArchPrepareCallCFunction: {
497 // Frame alignment requires using FP-relative frame addressing.
498 frame_access_state()->SetFrameAccessToFP();
499 int const num_parameters = MiscField::decode(instr->opcode());
500 __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
501 break;
502 }
503 case kArchPrepareTailCall:
504 AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
505 break;
506 case kArchCallCFunction: {
507 int const num_parameters = MiscField::decode(instr->opcode());
508 if (HasImmediateInput(instr, 0)) {
509 ExternalReference ref = i.InputExternalReference(0);
510 __ CallCFunction(ref, num_parameters);
511 } else {
512 Register func = i.InputRegister(0);
513 __ CallCFunction(func, num_parameters);
514 }
515 frame_access_state()->SetFrameAccessToDefault();
516 frame_access_state()->ClearSPDelta();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000517 break;
518 }
519 case kArchJmp:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400520 AssembleArchJump(i.InputRpo(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000521 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000522 case kArchLookupSwitch:
523 AssembleArchLookupSwitch(instr);
524 break;
525 case kArchTableSwitch:
526 AssembleArchTableSwitch(instr);
527 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000528 case kArchNop:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000529 case kArchThrowTerminator:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000530 // don't emit code for nops.
531 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000532 case kArchDeoptimize: {
533 int deopt_state_id =
534 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
535 Deoptimizer::BailoutType bailout_type =
536 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
537 AssembleDeoptimizerCall(deopt_state_id, bailout_type);
538 break;
539 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000540 case kArchRet:
541 AssembleReturn();
542 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400543 case kArchStackPointer:
544 __ mov(i.OutputRegister(), esp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000545 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000546 case kArchFramePointer:
547 __ mov(i.OutputRegister(), ebp);
548 break;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100549 case kArchParentFramePointer:
Ben Murdochda12d292016-06-02 14:46:10 +0100550 if (frame_access_state()->has_frame()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100551 __ mov(i.OutputRegister(), Operand(ebp, 0));
552 } else {
553 __ mov(i.OutputRegister(), ebp);
554 }
555 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400556 case kArchTruncateDoubleToI: {
557 auto result = i.OutputRegister();
558 auto input = i.InputDoubleRegister(0);
559 auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
560 __ cvttsd2si(result, Operand(input));
561 __ cmp(result, 1);
562 __ j(overflow, ool->entry());
563 __ bind(ool->exit());
564 break;
565 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000566 case kArchStoreWithWriteBarrier: {
567 RecordWriteMode mode =
568 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
569 Register object = i.InputRegister(0);
570 size_t index = 0;
571 Operand operand = i.MemoryOperand(&index);
572 Register value = i.InputRegister(index);
573 Register scratch0 = i.TempRegister(0);
574 Register scratch1 = i.TempRegister(1);
575 auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
576 scratch0, scratch1, mode);
577 __ mov(operand, value);
578 __ CheckPageFlag(object, scratch0,
579 MemoryChunk::kPointersFromHereAreInterestingMask,
580 not_zero, ool->entry());
581 __ bind(ool->exit());
582 break;
583 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100584 case kArchStackSlot: {
585 FrameOffset offset =
586 frame_access_state()->GetFrameOffset(i.InputInt32(0));
587 Register base;
588 if (offset.from_stack_pointer()) {
589 base = esp;
590 } else {
591 base = ebp;
592 }
593 __ lea(i.OutputRegister(), Operand(base, offset.offset()));
594 break;
595 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000596 case kIA32Add:
597 if (HasImmediateInput(instr, 1)) {
598 __ add(i.InputOperand(0), i.InputImmediate(1));
599 } else {
600 __ add(i.InputRegister(0), i.InputOperand(1));
601 }
602 break;
603 case kIA32And:
604 if (HasImmediateInput(instr, 1)) {
605 __ and_(i.InputOperand(0), i.InputImmediate(1));
606 } else {
607 __ and_(i.InputRegister(0), i.InputOperand(1));
608 }
609 break;
610 case kIA32Cmp:
Ben Murdochda12d292016-06-02 14:46:10 +0100611 ASSEMBLE_COMPARE(cmp);
612 break;
613 case kIA32Cmp16:
614 ASSEMBLE_COMPARE(cmpw);
615 break;
616 case kIA32Cmp8:
617 ASSEMBLE_COMPARE(cmpb);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000618 break;
619 case kIA32Test:
Ben Murdochda12d292016-06-02 14:46:10 +0100620 ASSEMBLE_COMPARE(test);
621 break;
622 case kIA32Test16:
623 ASSEMBLE_COMPARE(test_w);
624 break;
625 case kIA32Test8:
626 ASSEMBLE_COMPARE(test_b);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000627 break;
628 case kIA32Imul:
629 if (HasImmediateInput(instr, 1)) {
630 __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
631 } else {
632 __ imul(i.OutputRegister(), i.InputOperand(1));
633 }
634 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400635 case kIA32ImulHigh:
636 __ imul(i.InputRegister(1));
637 break;
638 case kIA32UmulHigh:
639 __ mul(i.InputRegister(1));
640 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000641 case kIA32Idiv:
642 __ cdq();
643 __ idiv(i.InputOperand(1));
644 break;
645 case kIA32Udiv:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400646 __ Move(edx, Immediate(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000647 __ div(i.InputOperand(1));
648 break;
649 case kIA32Not:
650 __ not_(i.OutputOperand());
651 break;
652 case kIA32Neg:
653 __ neg(i.OutputOperand());
654 break;
655 case kIA32Or:
656 if (HasImmediateInput(instr, 1)) {
657 __ or_(i.InputOperand(0), i.InputImmediate(1));
658 } else {
659 __ or_(i.InputRegister(0), i.InputOperand(1));
660 }
661 break;
662 case kIA32Xor:
663 if (HasImmediateInput(instr, 1)) {
664 __ xor_(i.InputOperand(0), i.InputImmediate(1));
665 } else {
666 __ xor_(i.InputRegister(0), i.InputOperand(1));
667 }
668 break;
669 case kIA32Sub:
670 if (HasImmediateInput(instr, 1)) {
671 __ sub(i.InputOperand(0), i.InputImmediate(1));
672 } else {
673 __ sub(i.InputRegister(0), i.InputOperand(1));
674 }
675 break;
676 case kIA32Shl:
677 if (HasImmediateInput(instr, 1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400678 __ shl(i.OutputOperand(), i.InputInt5(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000679 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400680 __ shl_cl(i.OutputOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000681 }
682 break;
683 case kIA32Shr:
684 if (HasImmediateInput(instr, 1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400685 __ shr(i.OutputOperand(), i.InputInt5(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000686 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400687 __ shr_cl(i.OutputOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000688 }
689 break;
690 case kIA32Sar:
691 if (HasImmediateInput(instr, 1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400692 __ sar(i.OutputOperand(), i.InputInt5(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000693 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400694 __ sar_cl(i.OutputOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000695 }
696 break;
Ben Murdochda12d292016-06-02 14:46:10 +0100697 case kIA32AddPair: {
698 // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
699 // i.InputRegister(1) ... left high word.
700 // i.InputRegister(2) ... right low word.
701 // i.InputRegister(3) ... right high word.
702 bool use_temp = false;
703 if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
704 i.OutputRegister(0).code() == i.InputRegister(3).code()) {
705 // We cannot write to the output register directly, because it would
706 // overwrite an input for adc. We have to use the temp register.
707 use_temp = true;
708 __ Move(i.TempRegister(0), i.InputRegister(0));
709 __ add(i.TempRegister(0), i.InputRegister(2));
710 } else {
711 __ add(i.OutputRegister(0), i.InputRegister(2));
712 }
713 __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
714 if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
715 __ Move(i.OutputRegister(1), i.InputRegister(1));
716 }
717 if (use_temp) {
718 __ Move(i.OutputRegister(0), i.TempRegister(0));
719 }
720 break;
721 }
722 case kIA32SubPair: {
723 // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
724 // i.InputRegister(1) ... left high word.
725 // i.InputRegister(2) ... right low word.
726 // i.InputRegister(3) ... right high word.
727 bool use_temp = false;
728 if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
729 i.OutputRegister(0).code() == i.InputRegister(3).code()) {
730 // We cannot write to the output register directly, because it would
731 // overwrite an input for adc. We have to use the temp register.
732 use_temp = true;
733 __ Move(i.TempRegister(0), i.InputRegister(0));
734 __ sub(i.TempRegister(0), i.InputRegister(2));
735 } else {
736 __ sub(i.OutputRegister(0), i.InputRegister(2));
737 }
738 __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
739 if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
740 __ Move(i.OutputRegister(1), i.InputRegister(1));
741 }
742 if (use_temp) {
743 __ Move(i.OutputRegister(0), i.TempRegister(0));
744 }
745 break;
746 }
747 case kIA32MulPair: {
748 __ imul(i.OutputRegister(1), i.InputOperand(0));
749 __ mov(i.TempRegister(0), i.InputOperand(1));
750 __ imul(i.TempRegister(0), i.InputOperand(2));
751 __ add(i.OutputRegister(1), i.TempRegister(0));
752 __ mov(i.OutputRegister(0), i.InputOperand(0));
753 // Multiplies the low words and stores them in eax and edx.
754 __ mul(i.InputRegister(2));
755 __ add(i.OutputRegister(1), i.TempRegister(0));
756
757 break;
758 }
759 case kIA32ShlPair:
760 if (HasImmediateInput(instr, 2)) {
761 __ ShlPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
762 } else {
763 // Shift has been loaded into CL by the register allocator.
764 __ ShlPair_cl(i.InputRegister(1), i.InputRegister(0));
765 }
766 break;
767 case kIA32ShrPair:
768 if (HasImmediateInput(instr, 2)) {
769 __ ShrPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
770 } else {
771 // Shift has been loaded into CL by the register allocator.
772 __ ShrPair_cl(i.InputRegister(1), i.InputRegister(0));
773 }
774 break;
775 case kIA32SarPair:
776 if (HasImmediateInput(instr, 2)) {
777 __ SarPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
778 } else {
779 // Shift has been loaded into CL by the register allocator.
780 __ SarPair_cl(i.InputRegister(1), i.InputRegister(0));
781 }
782 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000783 case kIA32Ror:
784 if (HasImmediateInput(instr, 1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400785 __ ror(i.OutputOperand(), i.InputInt5(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000786 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400787 __ ror_cl(i.OutputOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000788 }
789 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000790 case kIA32Lzcnt:
791 __ Lzcnt(i.OutputRegister(), i.InputOperand(0));
792 break;
793 case kIA32Tzcnt:
794 __ Tzcnt(i.OutputRegister(), i.InputOperand(0));
795 break;
796 case kIA32Popcnt:
797 __ Popcnt(i.OutputRegister(), i.InputOperand(0));
798 break;
799 case kSSEFloat32Cmp:
800 __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
801 break;
802 case kSSEFloat32Add:
803 __ addss(i.InputDoubleRegister(0), i.InputOperand(1));
804 break;
805 case kSSEFloat32Sub:
806 __ subss(i.InputDoubleRegister(0), i.InputOperand(1));
807 break;
808 case kSSEFloat32Mul:
809 __ mulss(i.InputDoubleRegister(0), i.InputOperand(1));
810 break;
811 case kSSEFloat32Div:
812 __ divss(i.InputDoubleRegister(0), i.InputOperand(1));
813 // Don't delete this mov. It may improve performance on some CPUs,
814 // when there is a (v)mulss depending on the result.
815 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
816 break;
817 case kSSEFloat32Max:
818 __ maxss(i.InputDoubleRegister(0), i.InputOperand(1));
819 break;
820 case kSSEFloat32Min:
821 __ minss(i.InputDoubleRegister(0), i.InputOperand(1));
822 break;
823 case kSSEFloat32Sqrt:
824 __ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
825 break;
826 case kSSEFloat32Abs: {
827 // TODO(bmeurer): Use 128-bit constants.
828 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
829 __ psrlq(kScratchDoubleReg, 33);
830 __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
831 break;
832 }
833 case kSSEFloat32Neg: {
834 // TODO(bmeurer): Use 128-bit constants.
835 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
836 __ psllq(kScratchDoubleReg, 31);
837 __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
838 break;
839 }
840 case kSSEFloat32Round: {
841 CpuFeatureScope sse_scope(masm(), SSE4_1);
842 RoundingMode const mode =
843 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
844 __ roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
845 break;
846 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000847 case kSSEFloat64Cmp:
848 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
849 break;
850 case kSSEFloat64Add:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400851 __ addsd(i.InputDoubleRegister(0), i.InputOperand(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000852 break;
853 case kSSEFloat64Sub:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400854 __ subsd(i.InputDoubleRegister(0), i.InputOperand(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000855 break;
856 case kSSEFloat64Mul:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400857 __ mulsd(i.InputDoubleRegister(0), i.InputOperand(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000858 break;
859 case kSSEFloat64Div:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400860 __ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000861 // Don't delete this mov. It may improve performance on some CPUs,
862 // when there is a (v)mulsd depending on the result.
863 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
864 break;
865 case kSSEFloat64Max:
866 __ maxsd(i.InputDoubleRegister(0), i.InputOperand(1));
867 break;
868 case kSSEFloat64Min:
869 __ minsd(i.InputDoubleRegister(0), i.InputOperand(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000870 break;
871 case kSSEFloat64Mod: {
872 // TODO(dcarney): alignment is wrong.
873 __ sub(esp, Immediate(kDoubleSize));
874 // Move values to st(0) and st(1).
875 __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
876 __ fld_d(Operand(esp, 0));
877 __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
878 __ fld_d(Operand(esp, 0));
879 // Loop while fprem isn't done.
880 Label mod_loop;
881 __ bind(&mod_loop);
882 // This instructions traps on all kinds inputs, but we are assuming the
883 // floating point control word is set to ignore them all.
884 __ fprem();
885 // The following 2 instruction implicitly use eax.
886 __ fnstsw_ax();
887 __ sahf();
888 __ j(parity_even, &mod_loop);
889 // Move output to stack and clean up.
890 __ fstp(1);
891 __ fstp_d(Operand(esp, 0));
892 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
893 __ add(esp, Immediate(kDoubleSize));
894 break;
895 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000896 case kSSEFloat64Abs: {
897 // TODO(bmeurer): Use 128-bit constants.
898 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
899 __ psrlq(kScratchDoubleReg, 1);
900 __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
901 break;
902 }
903 case kSSEFloat64Neg: {
904 // TODO(bmeurer): Use 128-bit constants.
905 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
906 __ psllq(kScratchDoubleReg, 63);
907 __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
908 break;
909 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000910 case kSSEFloat64Sqrt:
911 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
912 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000913 case kSSEFloat64Round: {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400914 CpuFeatureScope sse_scope(masm(), SSE4_1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000915 RoundingMode const mode =
916 static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
917 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400918 break;
919 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000920 case kSSEFloat32ToFloat64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400921 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
922 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000923 case kSSEFloat64ToFloat32:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400924 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
925 break;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100926 case kSSEFloat32ToInt32:
927 __ cvttss2si(i.OutputRegister(), i.InputOperand(0));
928 break;
929 case kSSEFloat32ToUint32: {
930 Label success;
931 __ cvttss2si(i.OutputRegister(), i.InputOperand(0));
932 __ test(i.OutputRegister(), i.OutputRegister());
933 __ j(positive, &success);
934 __ Move(kScratchDoubleReg, static_cast<float>(INT32_MIN));
935 __ addss(kScratchDoubleReg, i.InputOperand(0));
936 __ cvttss2si(i.OutputRegister(), kScratchDoubleReg);
937 __ or_(i.OutputRegister(), Immediate(0x80000000));
938 __ bind(&success);
939 break;
940 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000941 case kSSEFloat64ToInt32:
942 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
943 break;
944 case kSSEFloat64ToUint32: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000945 __ Move(kScratchDoubleReg, -2147483648.0);
946 __ addsd(kScratchDoubleReg, i.InputOperand(0));
947 __ cvttsd2si(i.OutputRegister(), kScratchDoubleReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000948 __ add(i.OutputRegister(), Immediate(0x80000000));
949 break;
950 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100951 case kSSEInt32ToFloat32:
952 __ cvtsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
953 break;
954 case kSSEUint32ToFloat32: {
955 Register scratch0 = i.TempRegister(0);
956 Register scratch1 = i.TempRegister(1);
957 __ mov(scratch0, i.InputOperand(0));
958 __ Cvtui2ss(i.OutputDoubleRegister(), scratch0, scratch1);
959 break;
960 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000961 case kSSEInt32ToFloat64:
962 __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
963 break;
964 case kSSEUint32ToFloat64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400965 __ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000966 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000967 case kSSEFloat64ExtractLowWord32:
968 if (instr->InputAt(0)->IsDoubleStackSlot()) {
969 __ mov(i.OutputRegister(), i.InputOperand(0));
970 } else {
971 __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
972 }
973 break;
974 case kSSEFloat64ExtractHighWord32:
975 if (instr->InputAt(0)->IsDoubleStackSlot()) {
976 __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
977 } else {
978 __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
979 }
980 break;
981 case kSSEFloat64InsertLowWord32:
982 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
983 break;
984 case kSSEFloat64InsertHighWord32:
985 __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
986 break;
987 case kSSEFloat64LoadLowWord32:
988 __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
989 break;
990 case kAVXFloat32Add: {
991 CpuFeatureScope avx_scope(masm(), AVX);
992 __ vaddss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
993 i.InputOperand(1));
994 break;
995 }
996 case kAVXFloat32Sub: {
997 CpuFeatureScope avx_scope(masm(), AVX);
998 __ vsubss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
999 i.InputOperand(1));
1000 break;
1001 }
1002 case kAVXFloat32Mul: {
1003 CpuFeatureScope avx_scope(masm(), AVX);
1004 __ vmulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1005 i.InputOperand(1));
1006 break;
1007 }
1008 case kAVXFloat32Div: {
1009 CpuFeatureScope avx_scope(masm(), AVX);
1010 __ vdivss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1011 i.InputOperand(1));
1012 // Don't delete this mov. It may improve performance on some CPUs,
1013 // when there is a (v)mulss depending on the result.
1014 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1015 break;
1016 }
1017 case kAVXFloat32Max: {
1018 CpuFeatureScope avx_scope(masm(), AVX);
1019 __ vmaxss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1020 i.InputOperand(1));
1021 break;
1022 }
1023 case kAVXFloat32Min: {
1024 CpuFeatureScope avx_scope(masm(), AVX);
1025 __ vminss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1026 i.InputOperand(1));
1027 break;
1028 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001029 case kAVXFloat64Add: {
1030 CpuFeatureScope avx_scope(masm(), AVX);
1031 __ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1032 i.InputOperand(1));
1033 break;
1034 }
1035 case kAVXFloat64Sub: {
1036 CpuFeatureScope avx_scope(masm(), AVX);
1037 __ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1038 i.InputOperand(1));
1039 break;
1040 }
1041 case kAVXFloat64Mul: {
1042 CpuFeatureScope avx_scope(masm(), AVX);
1043 __ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1044 i.InputOperand(1));
1045 break;
1046 }
1047 case kAVXFloat64Div: {
1048 CpuFeatureScope avx_scope(masm(), AVX);
1049 __ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1050 i.InputOperand(1));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001051 // Don't delete this mov. It may improve performance on some CPUs,
1052 // when there is a (v)mulsd depending on the result.
1053 __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1054 break;
1055 }
1056 case kAVXFloat64Max: {
1057 CpuFeatureScope avx_scope(masm(), AVX);
1058 __ vmaxsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1059 i.InputOperand(1));
1060 break;
1061 }
1062 case kAVXFloat64Min: {
1063 CpuFeatureScope avx_scope(masm(), AVX);
1064 __ vminsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1065 i.InputOperand(1));
1066 break;
1067 }
1068 case kAVXFloat32Abs: {
1069 // TODO(bmeurer): Use RIP relative 128-bit constants.
1070 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1071 __ psrlq(kScratchDoubleReg, 33);
1072 CpuFeatureScope avx_scope(masm(), AVX);
1073 __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
1074 break;
1075 }
1076 case kAVXFloat32Neg: {
1077 // TODO(bmeurer): Use RIP relative 128-bit constants.
1078 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1079 __ psllq(kScratchDoubleReg, 31);
1080 CpuFeatureScope avx_scope(masm(), AVX);
1081 __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
1082 break;
1083 }
1084 case kAVXFloat64Abs: {
1085 // TODO(bmeurer): Use RIP relative 128-bit constants.
1086 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1087 __ psrlq(kScratchDoubleReg, 1);
1088 CpuFeatureScope avx_scope(masm(), AVX);
1089 __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
1090 break;
1091 }
1092 case kAVXFloat64Neg: {
1093 // TODO(bmeurer): Use RIP relative 128-bit constants.
1094 __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1095 __ psllq(kScratchDoubleReg, 63);
1096 CpuFeatureScope avx_scope(masm(), AVX);
1097 __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001098 break;
1099 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001100 case kIA32Movsxbl:
1101 __ movsx_b(i.OutputRegister(), i.MemoryOperand());
1102 break;
1103 case kIA32Movzxbl:
1104 __ movzx_b(i.OutputRegister(), i.MemoryOperand());
1105 break;
1106 case kIA32Movb: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001107 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001108 Operand operand = i.MemoryOperand(&index);
1109 if (HasImmediateInput(instr, index)) {
1110 __ mov_b(operand, i.InputInt8(index));
1111 } else {
1112 __ mov_b(operand, i.InputRegister(index));
1113 }
1114 break;
1115 }
1116 case kIA32Movsxwl:
1117 __ movsx_w(i.OutputRegister(), i.MemoryOperand());
1118 break;
1119 case kIA32Movzxwl:
1120 __ movzx_w(i.OutputRegister(), i.MemoryOperand());
1121 break;
1122 case kIA32Movw: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001123 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001124 Operand operand = i.MemoryOperand(&index);
1125 if (HasImmediateInput(instr, index)) {
1126 __ mov_w(operand, i.InputInt16(index));
1127 } else {
1128 __ mov_w(operand, i.InputRegister(index));
1129 }
1130 break;
1131 }
1132 case kIA32Movl:
1133 if (instr->HasOutput()) {
1134 __ mov(i.OutputRegister(), i.MemoryOperand());
1135 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001136 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001137 Operand operand = i.MemoryOperand(&index);
1138 if (HasImmediateInput(instr, index)) {
1139 __ mov(operand, i.InputImmediate(index));
1140 } else {
1141 __ mov(operand, i.InputRegister(index));
1142 }
1143 }
1144 break;
1145 case kIA32Movsd:
1146 if (instr->HasOutput()) {
1147 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
1148 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001149 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001150 Operand operand = i.MemoryOperand(&index);
1151 __ movsd(operand, i.InputDoubleRegister(index));
1152 }
1153 break;
1154 case kIA32Movss:
1155 if (instr->HasOutput()) {
1156 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001157 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001158 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001159 Operand operand = i.MemoryOperand(&index);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001160 __ movss(operand, i.InputDoubleRegister(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001161 }
1162 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001163 case kIA32BitcastFI:
1164 if (instr->InputAt(0)->IsDoubleStackSlot()) {
1165 __ mov(i.OutputRegister(), i.InputOperand(0));
1166 } else {
1167 __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
1168 }
1169 break;
1170 case kIA32BitcastIF:
1171 if (instr->InputAt(0)->IsRegister()) {
1172 __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
1173 } else {
1174 __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
1175 }
1176 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001177 case kIA32Lea: {
1178 AddressingMode mode = AddressingModeField::decode(instr->opcode());
1179 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
1180 // and addressing mode just happens to work out. The "addl"/"subl" forms
1181 // in these cases are faster based on measurements.
1182 if (mode == kMode_MI) {
1183 __ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
1184 } else if (i.InputRegister(0).is(i.OutputRegister())) {
1185 if (mode == kMode_MRI) {
1186 int32_t constant_summand = i.InputInt32(1);
1187 if (constant_summand > 0) {
1188 __ add(i.OutputRegister(), Immediate(constant_summand));
1189 } else if (constant_summand < 0) {
1190 __ sub(i.OutputRegister(), Immediate(-constant_summand));
1191 }
1192 } else if (mode == kMode_MR1) {
1193 if (i.InputRegister(1).is(i.OutputRegister())) {
1194 __ shl(i.OutputRegister(), 1);
1195 } else {
1196 __ lea(i.OutputRegister(), i.MemoryOperand());
1197 }
1198 } else if (mode == kMode_M2) {
1199 __ shl(i.OutputRegister(), 1);
1200 } else if (mode == kMode_M4) {
1201 __ shl(i.OutputRegister(), 2);
1202 } else if (mode == kMode_M8) {
1203 __ shl(i.OutputRegister(), 3);
1204 } else {
1205 __ lea(i.OutputRegister(), i.MemoryOperand());
1206 }
1207 } else {
1208 __ lea(i.OutputRegister(), i.MemoryOperand());
1209 }
1210 break;
1211 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001212 case kIA32PushFloat32:
1213 if (instr->InputAt(0)->IsDoubleRegister()) {
1214 __ sub(esp, Immediate(kDoubleSize));
1215 __ movss(Operand(esp, 0), i.InputDoubleRegister(0));
1216 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1217 } else if (HasImmediateInput(instr, 0)) {
1218 __ Move(kScratchDoubleReg, i.InputDouble(0));
1219 __ sub(esp, Immediate(kDoubleSize));
1220 __ movss(Operand(esp, 0), kScratchDoubleReg);
1221 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001222 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001223 __ movsd(kScratchDoubleReg, i.InputOperand(0));
1224 __ sub(esp, Immediate(kDoubleSize));
1225 __ movss(Operand(esp, 0), kScratchDoubleReg);
1226 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001227 }
1228 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001229 case kIA32PushFloat64:
1230 if (instr->InputAt(0)->IsDoubleRegister()) {
1231 __ sub(esp, Immediate(kDoubleSize));
1232 __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
1233 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1234 } else if (HasImmediateInput(instr, 0)) {
1235 __ Move(kScratchDoubleReg, i.InputDouble(0));
1236 __ sub(esp, Immediate(kDoubleSize));
1237 __ movsd(Operand(esp, 0), kScratchDoubleReg);
1238 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1239 } else {
1240 __ movsd(kScratchDoubleReg, i.InputOperand(0));
1241 __ sub(esp, Immediate(kDoubleSize));
1242 __ movsd(Operand(esp, 0), kScratchDoubleReg);
1243 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1244 }
1245 break;
1246 case kIA32Push:
1247 if (instr->InputAt(0)->IsDoubleRegister()) {
1248 __ sub(esp, Immediate(kDoubleSize));
1249 __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
1250 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1251 } else if (HasImmediateInput(instr, 0)) {
1252 __ push(i.InputImmediate(0));
1253 frame_access_state()->IncreaseSPDelta(1);
1254 } else {
1255 __ push(i.InputOperand(0));
1256 frame_access_state()->IncreaseSPDelta(1);
1257 }
1258 break;
1259 case kIA32Poke: {
1260 int const slot = MiscField::decode(instr->opcode());
1261 if (HasImmediateInput(instr, 0)) {
1262 __ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
1263 } else {
1264 __ mov(Operand(esp, slot * kPointerSize), i.InputRegister(0));
1265 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001266 break;
1267 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001268 case kCheckedLoadInt8:
1269 ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
1270 break;
1271 case kCheckedLoadUint8:
1272 ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
1273 break;
1274 case kCheckedLoadInt16:
1275 ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
1276 break;
1277 case kCheckedLoadUint16:
1278 ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
1279 break;
1280 case kCheckedLoadWord32:
1281 ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
1282 break;
1283 case kCheckedLoadFloat32:
1284 ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
1285 break;
1286 case kCheckedLoadFloat64:
1287 ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
1288 break;
1289 case kCheckedStoreWord8:
1290 ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
1291 break;
1292 case kCheckedStoreWord16:
1293 ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
1294 break;
1295 case kCheckedStoreWord32:
1296 ASSEMBLE_CHECKED_STORE_INTEGER(mov);
1297 break;
1298 case kCheckedStoreFloat32:
1299 ASSEMBLE_CHECKED_STORE_FLOAT(movss);
1300 break;
1301 case kCheckedStoreFloat64:
1302 ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
1303 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001304 case kIA32StackCheck: {
1305 ExternalReference const stack_limit =
1306 ExternalReference::address_of_stack_limit(isolate());
1307 __ cmp(esp, Operand::StaticVariable(stack_limit));
1308 break;
1309 }
1310 case kCheckedLoadWord64:
1311 case kCheckedStoreWord64:
1312 UNREACHABLE(); // currently unsupported checked int64 load/store.
1313 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001314 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001315} // NOLINT(readability/fn_size)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001316
1317
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001318// Assembles a branch after an instruction.
1319void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001320 IA32OperandConverter i(this, instr);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001321 Label::Distance flabel_distance =
1322 branch->fallthru ? Label::kNear : Label::kFar;
1323 Label* tlabel = branch->true_label;
1324 Label* flabel = branch->false_label;
1325 switch (branch->condition) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001326 case kUnorderedEqual:
1327 __ j(parity_even, flabel, flabel_distance);
1328 // Fall through.
1329 case kEqual:
1330 __ j(equal, tlabel);
1331 break;
1332 case kUnorderedNotEqual:
1333 __ j(parity_even, tlabel);
1334 // Fall through.
1335 case kNotEqual:
1336 __ j(not_equal, tlabel);
1337 break;
1338 case kSignedLessThan:
1339 __ j(less, tlabel);
1340 break;
1341 case kSignedGreaterThanOrEqual:
1342 __ j(greater_equal, tlabel);
1343 break;
1344 case kSignedLessThanOrEqual:
1345 __ j(less_equal, tlabel);
1346 break;
1347 case kSignedGreaterThan:
1348 __ j(greater, tlabel);
1349 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001350 case kUnsignedLessThan:
1351 __ j(below, tlabel);
1352 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001353 case kUnsignedGreaterThanOrEqual:
1354 __ j(above_equal, tlabel);
1355 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001356 case kUnsignedLessThanOrEqual:
1357 __ j(below_equal, tlabel);
1358 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001359 case kUnsignedGreaterThan:
1360 __ j(above, tlabel);
1361 break;
1362 case kOverflow:
1363 __ j(overflow, tlabel);
1364 break;
1365 case kNotOverflow:
1366 __ j(no_overflow, tlabel);
1367 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001368 default:
1369 UNREACHABLE();
1370 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001371 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001372 // Add a jump if not falling through to the next block.
1373 if (!branch->fallthru) __ jmp(flabel);
1374}
1375
1376
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001377void CodeGenerator::AssembleArchJump(RpoNumber target) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001378 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001379}
1380
1381
1382// Assembles boolean materializations after an instruction.
1383void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1384 FlagsCondition condition) {
1385 IA32OperandConverter i(this, instr);
1386 Label done;
1387
1388 // Materialize a full 32-bit 1 or 0 value. The result register is always the
1389 // last output of the instruction.
1390 Label check;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001391 DCHECK_NE(0u, instr->OutputCount());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001392 Register reg = i.OutputRegister(instr->OutputCount() - 1);
1393 Condition cc = no_condition;
1394 switch (condition) {
1395 case kUnorderedEqual:
1396 __ j(parity_odd, &check, Label::kNear);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001397 __ Move(reg, Immediate(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001398 __ jmp(&done, Label::kNear);
1399 // Fall through.
1400 case kEqual:
1401 cc = equal;
1402 break;
1403 case kUnorderedNotEqual:
1404 __ j(parity_odd, &check, Label::kNear);
1405 __ mov(reg, Immediate(1));
1406 __ jmp(&done, Label::kNear);
1407 // Fall through.
1408 case kNotEqual:
1409 cc = not_equal;
1410 break;
1411 case kSignedLessThan:
1412 cc = less;
1413 break;
1414 case kSignedGreaterThanOrEqual:
1415 cc = greater_equal;
1416 break;
1417 case kSignedLessThanOrEqual:
1418 cc = less_equal;
1419 break;
1420 case kSignedGreaterThan:
1421 cc = greater;
1422 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001423 case kUnsignedLessThan:
1424 cc = below;
1425 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001426 case kUnsignedGreaterThanOrEqual:
1427 cc = above_equal;
1428 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001429 case kUnsignedLessThanOrEqual:
1430 cc = below_equal;
1431 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001432 case kUnsignedGreaterThan:
1433 cc = above;
1434 break;
1435 case kOverflow:
1436 cc = overflow;
1437 break;
1438 case kNotOverflow:
1439 cc = no_overflow;
1440 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001441 default:
1442 UNREACHABLE();
1443 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001444 }
1445 __ bind(&check);
1446 if (reg.is_byte_register()) {
1447 // setcc for byte registers (al, bl, cl, dl).
1448 __ setcc(cc, reg);
1449 __ movzx_b(reg, reg);
1450 } else {
1451 // Emit a branch to set a register to either 1 or 0.
1452 Label set;
1453 __ j(cc, &set, Label::kNear);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001454 __ Move(reg, Immediate(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001455 __ jmp(&done, Label::kNear);
1456 __ bind(&set);
1457 __ mov(reg, Immediate(1));
1458 }
1459 __ bind(&done);
1460}
1461
1462
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001463void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1464 IA32OperandConverter i(this, instr);
1465 Register input = i.InputRegister(0);
1466 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1467 __ cmp(input, Immediate(i.InputInt32(index + 0)));
1468 __ j(equal, GetLabel(i.InputRpo(index + 1)));
1469 }
1470 AssembleArchJump(i.InputRpo(1));
1471}
1472
1473
1474void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1475 IA32OperandConverter i(this, instr);
1476 Register input = i.InputRegister(0);
1477 size_t const case_count = instr->InputCount() - 2;
1478 Label** cases = zone()->NewArray<Label*>(case_count);
1479 for (size_t index = 0; index < case_count; ++index) {
1480 cases[index] = GetLabel(i.InputRpo(index + 2));
1481 }
1482 Label* const table = AddJumpTable(cases, case_count);
1483 __ cmp(input, Immediate(case_count));
1484 __ j(above_equal, GetLabel(i.InputRpo(1)));
1485 __ jmp(Operand::JumpTable(input, times_4, table));
1486}
1487
1488
1489void CodeGenerator::AssembleDeoptimizerCall(
1490 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001491 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001492 isolate(), deoptimization_id, bailout_type);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001493 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1494}
1495
1496
1497// The calling convention for JSFunctions on IA32 passes arguments on the
1498// stack and the JSFunction and context in EDI and ESI, respectively, thus
1499// the steps of the call look as follows:
1500
1501// --{ before the call instruction }--------------------------------------------
1502// | caller frame |
1503// ^ esp ^ ebp
1504
1505// --{ push arguments and setup ESI, EDI }--------------------------------------
1506// | args + receiver | caller frame |
1507// ^ esp ^ ebp
1508// [edi = JSFunction, esi = context]
1509
1510// --{ call [edi + kCodeEntryOffset] }------------------------------------------
1511// | RET | args + receiver | caller frame |
1512// ^ esp ^ ebp
1513
1514// =={ prologue of called function }============================================
1515// --{ push ebp }---------------------------------------------------------------
1516// | FP | RET | args + receiver | caller frame |
1517// ^ esp ^ ebp
1518
1519// --{ mov ebp, esp }-----------------------------------------------------------
1520// | FP | RET | args + receiver | caller frame |
1521// ^ ebp,esp
1522
1523// --{ push esi }---------------------------------------------------------------
1524// | CTX | FP | RET | args + receiver | caller frame |
1525// ^esp ^ ebp
1526
1527// --{ push edi }---------------------------------------------------------------
1528// | FNC | CTX | FP | RET | args + receiver | caller frame |
1529// ^esp ^ ebp
1530
1531// --{ subi esp, #N }-----------------------------------------------------------
1532// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
1533// ^esp ^ ebp
1534
1535// =={ body of called function }================================================
1536
1537// =={ epilogue of called function }============================================
1538// --{ mov esp, ebp }-----------------------------------------------------------
1539// | FP | RET | args + receiver | caller frame |
1540// ^ esp,ebp
1541
1542// --{ pop ebp }-----------------------------------------------------------
1543// | | RET | args + receiver | caller frame |
1544// ^ esp ^ ebp
1545
1546// --{ ret #A+1 }-----------------------------------------------------------
1547// | | caller frame |
1548// ^ esp ^ ebp
1549
1550
1551// Runtime function calls are accomplished by doing a stub call to the
1552// CEntryStub (a real code object). On IA32 passes arguments on the
1553// stack, the number of arguments in EAX, the address of the runtime function
1554// in EBX, and the context in ESI.
1555
1556// --{ before the call instruction }--------------------------------------------
1557// | caller frame |
1558// ^ esp ^ ebp
1559
1560// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
1561// | args + receiver | caller frame |
1562// ^ esp ^ ebp
1563// [eax = #args, ebx = runtime function, esi = context]
1564
1565// --{ call #CEntryStub }-------------------------------------------------------
1566// | RET | args + receiver | caller frame |
1567// ^ esp ^ ebp
1568
1569// =={ body of runtime function }===============================================
1570
1571// --{ runtime returns }--------------------------------------------------------
1572// | caller frame |
1573// ^ esp ^ ebp
1574
1575// Other custom linkages (e.g. for calling directly into and out of C++) may
1576// need to save callee-saved registers on the stack, which is done in the
1577// function prologue of generated code.
1578
1579// --{ before the call instruction }--------------------------------------------
1580// | caller frame |
1581// ^ esp ^ ebp
1582
1583// --{ set up arguments in registers on stack }---------------------------------
1584// | args | caller frame |
1585// ^ esp ^ ebp
1586// [r0 = arg0, r1 = arg1, ...]
1587
1588// --{ call code }--------------------------------------------------------------
1589// | RET | args | caller frame |
1590// ^ esp ^ ebp
1591
1592// =={ prologue of called function }============================================
1593// --{ push ebp }---------------------------------------------------------------
1594// | FP | RET | args | caller frame |
1595// ^ esp ^ ebp
1596
1597// --{ mov ebp, esp }-----------------------------------------------------------
1598// | FP | RET | args | caller frame |
1599// ^ ebp,esp
1600
1601// --{ save registers }---------------------------------------------------------
1602// | regs | FP | RET | args | caller frame |
1603// ^ esp ^ ebp
1604
1605// --{ subi esp, #N }-----------------------------------------------------------
1606// | callee frame | regs | FP | RET | args | caller frame |
1607// ^esp ^ ebp
1608
1609// =={ body of called function }================================================
1610
1611// =={ epilogue of called function }============================================
1612// --{ restore registers }------------------------------------------------------
1613// | regs | FP | RET | args | caller frame |
1614// ^ esp ^ ebp
1615
1616// --{ mov esp, ebp }-----------------------------------------------------------
1617// | FP | RET | args | caller frame |
1618// ^ esp,ebp
1619
1620// --{ pop ebp }----------------------------------------------------------------
1621// | RET | args | caller frame |
1622// ^ esp ^ ebp
1623
1624
1625void CodeGenerator::AssemblePrologue() {
1626 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
Ben Murdochda12d292016-06-02 14:46:10 +01001627 if (frame_access_state()->has_frame()) {
1628 if (descriptor->IsCFunctionCall()) {
1629 __ push(ebp);
1630 __ mov(ebp, esp);
1631 } else if (descriptor->IsJSFunctionCall()) {
1632 __ Prologue(this->info()->GeneratePreagedPrologue());
1633 } else {
1634 __ StubPrologue(info()->GetOutputStackFrameType());
1635 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001636 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001637 int stack_shrink_slots = frame()->GetSpillSlotCount();
1638 if (info()->is_osr()) {
1639 // TurboFan OSR-compiled functions cannot be entered directly.
1640 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1641
1642 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1643 // frame is still on the stack. Optimized code uses OSR values directly from
1644 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1645 // remaining stack slots.
1646 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1647 osr_pc_offset_ = __ pc_offset();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001648 stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1649 }
1650
1651 const RegList saves = descriptor->CalleeSavedRegisters();
1652 if (stack_shrink_slots > 0) {
1653 __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
1654 }
1655
1656 if (saves != 0) { // Save callee-saved registers.
1657 DCHECK(!info()->is_osr());
1658 int pushed = 0;
1659 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1660 if (!((1 << i) & saves)) continue;
1661 __ push(Register::from_code(i));
1662 ++pushed;
1663 }
1664 frame()->AllocateSavedCalleeRegisterSlots(pushed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001665 }
1666}
1667
1668
1669void CodeGenerator::AssembleReturn() {
1670 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001671
1672 const RegList saves = descriptor->CalleeSavedRegisters();
1673 // Restore registers.
1674 if (saves != 0) {
1675 for (int i = 0; i < Register::kNumRegisters; i++) {
1676 if (!((1 << i) & saves)) continue;
1677 __ pop(Register::from_code(i));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001678 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001679 }
1680
1681 if (descriptor->IsCFunctionCall()) {
Ben Murdochda12d292016-06-02 14:46:10 +01001682 AssembleDeconstructFrame();
1683 } else if (frame_access_state()->has_frame()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001684 // Canonicalize JSFunction return sites for now.
1685 if (return_label_.is_bound()) {
1686 __ jmp(&return_label_);
1687 return;
1688 } else {
1689 __ bind(&return_label_);
Ben Murdochda12d292016-06-02 14:46:10 +01001690 AssembleDeconstructFrame();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001691 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001692 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001693 size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
1694 // Might need ecx for scratch if pop_size is too big.
1695 DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
1696 __ Ret(static_cast<int>(pop_size), ecx);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001697}
1698
1699
1700void CodeGenerator::AssembleMove(InstructionOperand* source,
1701 InstructionOperand* destination) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001702 IA32OperandConverter g(this, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001703 // Dispatch on the source and destination operand kinds. Not all
1704 // combinations are possible.
1705 if (source->IsRegister()) {
1706 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1707 Register src = g.ToRegister(source);
1708 Operand dst = g.ToOperand(destination);
1709 __ mov(dst, src);
1710 } else if (source->IsStackSlot()) {
1711 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1712 Operand src = g.ToOperand(source);
1713 if (destination->IsRegister()) {
1714 Register dst = g.ToRegister(destination);
1715 __ mov(dst, src);
1716 } else {
1717 Operand dst = g.ToOperand(destination);
1718 __ push(src);
1719 __ pop(dst);
1720 }
1721 } else if (source->IsConstant()) {
1722 Constant src_constant = g.ToConstant(source);
1723 if (src_constant.type() == Constant::kHeapObject) {
1724 Handle<HeapObject> src = src_constant.ToHeapObject();
Ben Murdochda12d292016-06-02 14:46:10 +01001725 int slot;
1726 if (IsMaterializableFromFrame(src, &slot)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001727 if (destination->IsRegister()) {
1728 Register dst = g.ToRegister(destination);
Ben Murdochda12d292016-06-02 14:46:10 +01001729 __ mov(dst, g.SlotToOperand(slot));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001730 } else {
1731 DCHECK(destination->IsStackSlot());
1732 Operand dst = g.ToOperand(destination);
Ben Murdochda12d292016-06-02 14:46:10 +01001733 __ push(g.SlotToOperand(slot));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001734 __ pop(dst);
1735 }
1736 } else if (destination->IsRegister()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001737 Register dst = g.ToRegister(destination);
1738 __ LoadHeapObject(dst, src);
1739 } else {
1740 DCHECK(destination->IsStackSlot());
1741 Operand dst = g.ToOperand(destination);
1742 AllowDeferredHandleDereference embedding_raw_address;
1743 if (isolate()->heap()->InNewSpace(*src)) {
1744 __ PushHeapObject(src);
1745 __ pop(dst);
1746 } else {
1747 __ mov(dst, src);
1748 }
1749 }
1750 } else if (destination->IsRegister()) {
1751 Register dst = g.ToRegister(destination);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001752 __ Move(dst, g.ToImmediate(source));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001753 } else if (destination->IsStackSlot()) {
1754 Operand dst = g.ToOperand(destination);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001755 __ Move(dst, g.ToImmediate(source));
1756 } else if (src_constant.type() == Constant::kFloat32) {
1757 // TODO(turbofan): Can we do better here?
1758 uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001759 if (destination->IsDoubleRegister()) {
1760 XMMRegister dst = g.ToDoubleRegister(destination);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001761 __ Move(dst, src);
1762 } else {
1763 DCHECK(destination->IsDoubleStackSlot());
1764 Operand dst = g.ToOperand(destination);
1765 __ Move(dst, Immediate(src));
1766 }
1767 } else {
1768 DCHECK_EQ(Constant::kFloat64, src_constant.type());
1769 uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
1770 uint32_t lower = static_cast<uint32_t>(src);
1771 uint32_t upper = static_cast<uint32_t>(src >> 32);
1772 if (destination->IsDoubleRegister()) {
1773 XMMRegister dst = g.ToDoubleRegister(destination);
1774 __ Move(dst, src);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001775 } else {
1776 DCHECK(destination->IsDoubleStackSlot());
1777 Operand dst0 = g.ToOperand(destination);
1778 Operand dst1 = g.HighOperand(destination);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001779 __ Move(dst0, Immediate(lower));
1780 __ Move(dst1, Immediate(upper));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001781 }
1782 }
1783 } else if (source->IsDoubleRegister()) {
1784 XMMRegister src = g.ToDoubleRegister(source);
1785 if (destination->IsDoubleRegister()) {
1786 XMMRegister dst = g.ToDoubleRegister(destination);
1787 __ movaps(dst, src);
1788 } else {
1789 DCHECK(destination->IsDoubleStackSlot());
1790 Operand dst = g.ToOperand(destination);
1791 __ movsd(dst, src);
1792 }
1793 } else if (source->IsDoubleStackSlot()) {
1794 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1795 Operand src = g.ToOperand(source);
1796 if (destination->IsDoubleRegister()) {
1797 XMMRegister dst = g.ToDoubleRegister(destination);
1798 __ movsd(dst, src);
1799 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001800 Operand dst = g.ToOperand(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001801 __ movsd(kScratchDoubleReg, src);
1802 __ movsd(dst, kScratchDoubleReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001803 }
1804 } else {
1805 UNREACHABLE();
1806 }
1807}
1808
1809
1810void CodeGenerator::AssembleSwap(InstructionOperand* source,
1811 InstructionOperand* destination) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001812 IA32OperandConverter g(this, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001813 // Dispatch on the source and destination operand kinds. Not all
1814 // combinations are possible.
1815 if (source->IsRegister() && destination->IsRegister()) {
1816 // Register-register.
1817 Register src = g.ToRegister(source);
1818 Register dst = g.ToRegister(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001819 __ push(src);
1820 __ mov(src, dst);
1821 __ pop(dst);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001822 } else if (source->IsRegister() && destination->IsStackSlot()) {
1823 // Register-memory.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001824 Register src = g.ToRegister(source);
1825 __ push(src);
1826 frame_access_state()->IncreaseSPDelta(1);
1827 Operand dst = g.ToOperand(destination);
1828 __ mov(src, dst);
1829 frame_access_state()->IncreaseSPDelta(-1);
1830 dst = g.ToOperand(destination);
1831 __ pop(dst);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001832 } else if (source->IsStackSlot() && destination->IsStackSlot()) {
1833 // Memory-memory.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001834 Operand dst1 = g.ToOperand(destination);
1835 __ push(dst1);
1836 frame_access_state()->IncreaseSPDelta(1);
1837 Operand src1 = g.ToOperand(source);
1838 __ push(src1);
1839 Operand dst2 = g.ToOperand(destination);
1840 __ pop(dst2);
1841 frame_access_state()->IncreaseSPDelta(-1);
1842 Operand src2 = g.ToOperand(source);
1843 __ pop(src2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001844 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001845 // XMM register-register swap.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001846 XMMRegister src = g.ToDoubleRegister(source);
1847 XMMRegister dst = g.ToDoubleRegister(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001848 __ movaps(kScratchDoubleReg, src);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001849 __ movaps(src, dst);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001850 __ movaps(dst, kScratchDoubleReg);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001851 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001852 // XMM register-memory swap.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001853 XMMRegister reg = g.ToDoubleRegister(source);
1854 Operand other = g.ToOperand(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001855 __ movsd(kScratchDoubleReg, other);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001856 __ movsd(other, reg);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001857 __ movaps(reg, kScratchDoubleReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001858 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
1859 // Double-width memory-to-memory.
1860 Operand src0 = g.ToOperand(source);
1861 Operand src1 = g.HighOperand(source);
1862 Operand dst0 = g.ToOperand(destination);
1863 Operand dst1 = g.HighOperand(destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001864 __ movsd(kScratchDoubleReg, dst0); // Save destination in scratch register.
1865 __ push(src0); // Then use stack to copy source to destination.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001866 __ pop(dst0);
1867 __ push(src1);
1868 __ pop(dst1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001869 __ movsd(src0, kScratchDoubleReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001870 } else {
1871 // No other combinations are possible.
1872 UNREACHABLE();
1873 }
1874}
1875
1876
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001877void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1878 for (size_t index = 0; index < target_count; ++index) {
1879 __ dd(targets[index]);
1880 }
1881}
1882
1883
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001884void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1885
1886
1887void CodeGenerator::EnsureSpaceForLazyDeopt() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001888 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
1889 return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001890 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001891
1892 int space_needed = Deoptimizer::patch_size();
1893 // Ensure that we have enough space after the previous lazy-bailout
1894 // instruction for patching the code here.
1895 int current_pc = masm()->pc_offset();
1896 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1897 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1898 __ Nop(padding_size);
1899 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001900}
1901
1902#undef __
1903
1904} // namespace compiler
1905} // namespace internal
1906} // namespace v8