blob: 55f7426a4cbf0d6a630462d92785c7ae5b756026 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
7#include "src/compiler/code-generator-impl.h"
8#include "src/compiler/gap-resolver.h"
9#include "src/compiler/node-matchers.h"
10#include "src/compiler/node-properties-inl.h"
11#include "src/ia32/assembler-ia32.h"
12#include "src/ia32/macro-assembler-ia32.h"
13#include "src/scopes.h"
14
15namespace v8 {
16namespace internal {
17namespace compiler {
18
19#define __ masm()->
20
21
22// Adds IA-32 specific methods for decoding operands.
23class IA32OperandConverter : public InstructionOperandConverter {
24 public:
25 IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
27
28 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
29
30 Immediate InputImmediate(int index) {
31 return ToImmediate(instr_->InputAt(index));
32 }
33
34 Operand OutputOperand() { return ToOperand(instr_->Output()); }
35
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036 Operand ToOperand(InstructionOperand* op, int extra = 0) {
37 if (op->IsRegister()) {
38 DCHECK(extra == 0);
39 return Operand(ToRegister(op));
40 } else if (op->IsDoubleRegister()) {
41 DCHECK(extra == 0);
42 return Operand(ToDoubleRegister(op));
43 }
44 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
45 // The linkage computes where all spill slots are located.
46 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
47 return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
48 }
49
50 Operand HighOperand(InstructionOperand* op) {
51 DCHECK(op->IsDoubleStackSlot());
52 return ToOperand(op, kPointerSize);
53 }
54
55 Immediate ToImmediate(InstructionOperand* operand) {
56 Constant constant = ToConstant(operand);
57 switch (constant.type()) {
58 case Constant::kInt32:
59 return Immediate(constant.ToInt32());
Emily Bernierd0a1eb72015-03-24 16:35:39 -040060 case Constant::kFloat32:
61 return Immediate(
62 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +000063 case Constant::kFloat64:
64 return Immediate(
65 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
66 case Constant::kExternalReference:
67 return Immediate(constant.ToExternalReference());
68 case Constant::kHeapObject:
69 return Immediate(constant.ToHeapObject());
70 case Constant::kInt64:
71 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -040072 case Constant::kRpoNumber:
73 return Immediate::CodeRelativeOffset(ToLabel(operand));
Ben Murdochb8a8cc12014-11-26 15:28:44 +000074 }
75 UNREACHABLE();
76 return Immediate(-1);
77 }
78
Emily Bernierd0a1eb72015-03-24 16:35:39 -040079 static int NextOffset(int* offset) {
80 int i = *offset;
81 (*offset)++;
82 return i;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000083 }
84
Emily Bernierd0a1eb72015-03-24 16:35:39 -040085 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
86 STATIC_ASSERT(0 == static_cast<int>(times_1));
87 STATIC_ASSERT(1 == static_cast<int>(times_2));
88 STATIC_ASSERT(2 == static_cast<int>(times_4));
89 STATIC_ASSERT(3 == static_cast<int>(times_8));
90 int scale = static_cast<int>(mode - one);
91 DCHECK(scale >= 0 && scale < 4);
92 return static_cast<ScaleFactor>(scale);
93 }
94
95 Operand MemoryOperand(int* offset) {
96 AddressingMode mode = AddressingModeField::decode(instr_->opcode());
97 switch (mode) {
98 case kMode_MR: {
99 Register base = InputRegister(NextOffset(offset));
100 int32_t disp = 0;
101 return Operand(base, disp);
102 }
103 case kMode_MRI: {
104 Register base = InputRegister(NextOffset(offset));
105 int32_t disp = InputInt32(NextOffset(offset));
106 return Operand(base, disp);
107 }
108 case kMode_MR1:
109 case kMode_MR2:
110 case kMode_MR4:
111 case kMode_MR8: {
112 Register base = InputRegister(NextOffset(offset));
113 Register index = InputRegister(NextOffset(offset));
114 ScaleFactor scale = ScaleFor(kMode_MR1, mode);
115 int32_t disp = 0;
116 return Operand(base, index, scale, disp);
117 }
118 case kMode_MR1I:
119 case kMode_MR2I:
120 case kMode_MR4I:
121 case kMode_MR8I: {
122 Register base = InputRegister(NextOffset(offset));
123 Register index = InputRegister(NextOffset(offset));
124 ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
125 int32_t disp = InputInt32(NextOffset(offset));
126 return Operand(base, index, scale, disp);
127 }
128 case kMode_M1:
129 case kMode_M2:
130 case kMode_M4:
131 case kMode_M8: {
132 Register index = InputRegister(NextOffset(offset));
133 ScaleFactor scale = ScaleFor(kMode_M1, mode);
134 int32_t disp = 0;
135 return Operand(index, scale, disp);
136 }
137 case kMode_M1I:
138 case kMode_M2I:
139 case kMode_M4I:
140 case kMode_M8I: {
141 Register index = InputRegister(NextOffset(offset));
142 ScaleFactor scale = ScaleFor(kMode_M1I, mode);
143 int32_t disp = InputInt32(NextOffset(offset));
144 return Operand(index, scale, disp);
145 }
146 case kMode_MI: {
147 int32_t disp = InputInt32(NextOffset(offset));
148 return Operand(Immediate(disp));
149 }
150 case kMode_None:
151 UNREACHABLE();
152 return Operand(no_reg, 0);
153 }
154 UNREACHABLE();
155 return Operand(no_reg, 0);
156 }
157
158 Operand MemoryOperand(int first_input = 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000159 return MemoryOperand(&first_input);
160 }
161};
162
163
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400164namespace {
165
166bool HasImmediateInput(Instruction* instr, int index) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000167 return instr->InputAt(index)->IsImmediate();
168}
169
170
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400171class OutOfLineLoadInteger FINAL : public OutOfLineCode {
172 public:
173 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
174 : OutOfLineCode(gen), result_(result) {}
175
176 void Generate() FINAL { __ xor_(result_, result_); }
177
178 private:
179 Register const result_;
180};
181
182
183class OutOfLineLoadFloat FINAL : public OutOfLineCode {
184 public:
185 OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
186 : OutOfLineCode(gen), result_(result) {}
187
188 void Generate() FINAL { __ pcmpeqd(result_, result_); }
189
190 private:
191 XMMRegister const result_;
192};
193
194
195class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
196 public:
197 OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
198 XMMRegister input)
199 : OutOfLineCode(gen), result_(result), input_(input) {}
200
201 void Generate() FINAL {
202 __ sub(esp, Immediate(kDoubleSize));
203 __ movsd(MemOperand(esp, 0), input_);
204 __ SlowTruncateToI(result_, esp, 0);
205 __ add(esp, Immediate(kDoubleSize));
206 }
207
208 private:
209 Register const result_;
210 XMMRegister const input_;
211};
212
213} // namespace
214
215
216#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
217 do { \
218 auto result = i.OutputDoubleRegister(); \
219 auto offset = i.InputRegister(0); \
220 if (instr->InputAt(1)->IsRegister()) { \
221 __ cmp(offset, i.InputRegister(1)); \
222 } else { \
223 __ cmp(offset, i.InputImmediate(1)); \
224 } \
225 OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
226 __ j(above_equal, ool->entry()); \
227 __ asm_instr(result, i.MemoryOperand(2)); \
228 __ bind(ool->exit()); \
229 } while (false)
230
231
232#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
233 do { \
234 auto result = i.OutputRegister(); \
235 auto offset = i.InputRegister(0); \
236 if (instr->InputAt(1)->IsRegister()) { \
237 __ cmp(offset, i.InputRegister(1)); \
238 } else { \
239 __ cmp(offset, i.InputImmediate(1)); \
240 } \
241 OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
242 __ j(above_equal, ool->entry()); \
243 __ asm_instr(result, i.MemoryOperand(2)); \
244 __ bind(ool->exit()); \
245 } while (false)
246
247
248#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
249 do { \
250 auto offset = i.InputRegister(0); \
251 if (instr->InputAt(1)->IsRegister()) { \
252 __ cmp(offset, i.InputRegister(1)); \
253 } else { \
254 __ cmp(offset, i.InputImmediate(1)); \
255 } \
256 Label done; \
257 __ j(above_equal, &done, Label::kNear); \
258 __ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
259 __ bind(&done); \
260 } while (false)
261
262
263#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
264 do { \
265 auto offset = i.InputRegister(0); \
266 if (instr->InputAt(1)->IsRegister()) { \
267 __ cmp(offset, i.InputRegister(1)); \
268 } else { \
269 __ cmp(offset, i.InputImmediate(1)); \
270 } \
271 Label done; \
272 __ j(above_equal, &done, Label::kNear); \
273 if (instr->InputAt(2)->IsRegister()) { \
274 __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
275 } else { \
276 __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
277 } \
278 __ bind(&done); \
279 } while (false)
280
281
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000282// Assembles an instruction after register allocation, producing machine code.
283void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
284 IA32OperandConverter i(this, instr);
285
286 switch (ArchOpcodeField::decode(instr->opcode())) {
287 case kArchCallCodeObject: {
288 EnsureSpaceForLazyDeopt();
289 if (HasImmediateInput(instr, 0)) {
290 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
291 __ call(code, RelocInfo::CODE_TARGET);
292 } else {
293 Register reg = i.InputRegister(0);
294 __ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
295 }
296 AddSafepointAndDeopt(instr);
297 break;
298 }
299 case kArchCallJSFunction: {
300 EnsureSpaceForLazyDeopt();
301 Register func = i.InputRegister(0);
302 if (FLAG_debug_code) {
303 // Check the function's context matches the context argument.
304 __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
305 __ Assert(equal, kWrongFunctionContext);
306 }
307 __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
308 AddSafepointAndDeopt(instr);
309 break;
310 }
311 case kArchJmp:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400312 AssembleArchJump(i.InputRpo(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000313 break;
314 case kArchNop:
315 // don't emit code for nops.
316 break;
317 case kArchRet:
318 AssembleReturn();
319 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400320 case kArchStackPointer:
321 __ mov(i.OutputRegister(), esp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000322 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400323 case kArchTruncateDoubleToI: {
324 auto result = i.OutputRegister();
325 auto input = i.InputDoubleRegister(0);
326 auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
327 __ cvttsd2si(result, Operand(input));
328 __ cmp(result, 1);
329 __ j(overflow, ool->entry());
330 __ bind(ool->exit());
331 break;
332 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000333 case kIA32Add:
334 if (HasImmediateInput(instr, 1)) {
335 __ add(i.InputOperand(0), i.InputImmediate(1));
336 } else {
337 __ add(i.InputRegister(0), i.InputOperand(1));
338 }
339 break;
340 case kIA32And:
341 if (HasImmediateInput(instr, 1)) {
342 __ and_(i.InputOperand(0), i.InputImmediate(1));
343 } else {
344 __ and_(i.InputRegister(0), i.InputOperand(1));
345 }
346 break;
347 case kIA32Cmp:
348 if (HasImmediateInput(instr, 1)) {
349 __ cmp(i.InputOperand(0), i.InputImmediate(1));
350 } else {
351 __ cmp(i.InputRegister(0), i.InputOperand(1));
352 }
353 break;
354 case kIA32Test:
355 if (HasImmediateInput(instr, 1)) {
356 __ test(i.InputOperand(0), i.InputImmediate(1));
357 } else {
358 __ test(i.InputRegister(0), i.InputOperand(1));
359 }
360 break;
361 case kIA32Imul:
362 if (HasImmediateInput(instr, 1)) {
363 __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
364 } else {
365 __ imul(i.OutputRegister(), i.InputOperand(1));
366 }
367 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400368 case kIA32ImulHigh:
369 __ imul(i.InputRegister(1));
370 break;
371 case kIA32UmulHigh:
372 __ mul(i.InputRegister(1));
373 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000374 case kIA32Idiv:
375 __ cdq();
376 __ idiv(i.InputOperand(1));
377 break;
378 case kIA32Udiv:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400379 __ Move(edx, Immediate(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000380 __ div(i.InputOperand(1));
381 break;
382 case kIA32Not:
383 __ not_(i.OutputOperand());
384 break;
385 case kIA32Neg:
386 __ neg(i.OutputOperand());
387 break;
388 case kIA32Or:
389 if (HasImmediateInput(instr, 1)) {
390 __ or_(i.InputOperand(0), i.InputImmediate(1));
391 } else {
392 __ or_(i.InputRegister(0), i.InputOperand(1));
393 }
394 break;
395 case kIA32Xor:
396 if (HasImmediateInput(instr, 1)) {
397 __ xor_(i.InputOperand(0), i.InputImmediate(1));
398 } else {
399 __ xor_(i.InputRegister(0), i.InputOperand(1));
400 }
401 break;
402 case kIA32Sub:
403 if (HasImmediateInput(instr, 1)) {
404 __ sub(i.InputOperand(0), i.InputImmediate(1));
405 } else {
406 __ sub(i.InputRegister(0), i.InputOperand(1));
407 }
408 break;
409 case kIA32Shl:
410 if (HasImmediateInput(instr, 1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400411 __ shl(i.OutputOperand(), i.InputInt5(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000412 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400413 __ shl_cl(i.OutputOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000414 }
415 break;
416 case kIA32Shr:
417 if (HasImmediateInput(instr, 1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400418 __ shr(i.OutputOperand(), i.InputInt5(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000419 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400420 __ shr_cl(i.OutputOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000421 }
422 break;
423 case kIA32Sar:
424 if (HasImmediateInput(instr, 1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400425 __ sar(i.OutputOperand(), i.InputInt5(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000426 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400427 __ sar_cl(i.OutputOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000428 }
429 break;
430 case kIA32Ror:
431 if (HasImmediateInput(instr, 1)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400432 __ ror(i.OutputOperand(), i.InputInt5(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000433 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400434 __ ror_cl(i.OutputOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000435 }
436 break;
437 case kSSEFloat64Cmp:
438 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
439 break;
440 case kSSEFloat64Add:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400441 __ addsd(i.InputDoubleRegister(0), i.InputOperand(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000442 break;
443 case kSSEFloat64Sub:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400444 __ subsd(i.InputDoubleRegister(0), i.InputOperand(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000445 break;
446 case kSSEFloat64Mul:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400447 __ mulsd(i.InputDoubleRegister(0), i.InputOperand(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000448 break;
449 case kSSEFloat64Div:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400450 __ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000451 break;
452 case kSSEFloat64Mod: {
453 // TODO(dcarney): alignment is wrong.
454 __ sub(esp, Immediate(kDoubleSize));
455 // Move values to st(0) and st(1).
456 __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
457 __ fld_d(Operand(esp, 0));
458 __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
459 __ fld_d(Operand(esp, 0));
460 // Loop while fprem isn't done.
461 Label mod_loop;
462 __ bind(&mod_loop);
463 // This instructions traps on all kinds inputs, but we are assuming the
464 // floating point control word is set to ignore them all.
465 __ fprem();
466 // The following 2 instruction implicitly use eax.
467 __ fnstsw_ax();
468 __ sahf();
469 __ j(parity_even, &mod_loop);
470 // Move output to stack and clean up.
471 __ fstp(1);
472 __ fstp_d(Operand(esp, 0));
473 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
474 __ add(esp, Immediate(kDoubleSize));
475 break;
476 }
477 case kSSEFloat64Sqrt:
478 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
479 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400480 case kSSEFloat64Floor: {
481 CpuFeatureScope sse_scope(masm(), SSE4_1);
482 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
483 v8::internal::Assembler::kRoundDown);
484 break;
485 }
486 case kSSEFloat64Ceil: {
487 CpuFeatureScope sse_scope(masm(), SSE4_1);
488 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
489 v8::internal::Assembler::kRoundUp);
490 break;
491 }
492 case kSSEFloat64RoundTruncate: {
493 CpuFeatureScope sse_scope(masm(), SSE4_1);
494 __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
495 v8::internal::Assembler::kRoundToZero);
496 break;
497 }
498 case kSSECvtss2sd:
499 __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
500 break;
501 case kSSECvtsd2ss:
502 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
503 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000504 case kSSEFloat64ToInt32:
505 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
506 break;
507 case kSSEFloat64ToUint32: {
508 XMMRegister scratch = xmm0;
509 __ Move(scratch, -2147483648.0);
510 __ addsd(scratch, i.InputOperand(0));
511 __ cvttsd2si(i.OutputRegister(), scratch);
512 __ add(i.OutputRegister(), Immediate(0x80000000));
513 break;
514 }
515 case kSSEInt32ToFloat64:
516 __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
517 break;
518 case kSSEUint32ToFloat64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400519 __ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000520 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400521 case kAVXFloat64Add: {
522 CpuFeatureScope avx_scope(masm(), AVX);
523 __ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
524 i.InputOperand(1));
525 break;
526 }
527 case kAVXFloat64Sub: {
528 CpuFeatureScope avx_scope(masm(), AVX);
529 __ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
530 i.InputOperand(1));
531 break;
532 }
533 case kAVXFloat64Mul: {
534 CpuFeatureScope avx_scope(masm(), AVX);
535 __ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
536 i.InputOperand(1));
537 break;
538 }
539 case kAVXFloat64Div: {
540 CpuFeatureScope avx_scope(masm(), AVX);
541 __ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
542 i.InputOperand(1));
543 break;
544 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000545 case kIA32Movsxbl:
546 __ movsx_b(i.OutputRegister(), i.MemoryOperand());
547 break;
548 case kIA32Movzxbl:
549 __ movzx_b(i.OutputRegister(), i.MemoryOperand());
550 break;
551 case kIA32Movb: {
552 int index = 0;
553 Operand operand = i.MemoryOperand(&index);
554 if (HasImmediateInput(instr, index)) {
555 __ mov_b(operand, i.InputInt8(index));
556 } else {
557 __ mov_b(operand, i.InputRegister(index));
558 }
559 break;
560 }
561 case kIA32Movsxwl:
562 __ movsx_w(i.OutputRegister(), i.MemoryOperand());
563 break;
564 case kIA32Movzxwl:
565 __ movzx_w(i.OutputRegister(), i.MemoryOperand());
566 break;
567 case kIA32Movw: {
568 int index = 0;
569 Operand operand = i.MemoryOperand(&index);
570 if (HasImmediateInput(instr, index)) {
571 __ mov_w(operand, i.InputInt16(index));
572 } else {
573 __ mov_w(operand, i.InputRegister(index));
574 }
575 break;
576 }
577 case kIA32Movl:
578 if (instr->HasOutput()) {
579 __ mov(i.OutputRegister(), i.MemoryOperand());
580 } else {
581 int index = 0;
582 Operand operand = i.MemoryOperand(&index);
583 if (HasImmediateInput(instr, index)) {
584 __ mov(operand, i.InputImmediate(index));
585 } else {
586 __ mov(operand, i.InputRegister(index));
587 }
588 }
589 break;
590 case kIA32Movsd:
591 if (instr->HasOutput()) {
592 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
593 } else {
594 int index = 0;
595 Operand operand = i.MemoryOperand(&index);
596 __ movsd(operand, i.InputDoubleRegister(index));
597 }
598 break;
599 case kIA32Movss:
600 if (instr->HasOutput()) {
601 __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000602 } else {
603 int index = 0;
604 Operand operand = i.MemoryOperand(&index);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400605 __ movss(operand, i.InputDoubleRegister(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000606 }
607 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400608 case kIA32Lea: {
609 AddressingMode mode = AddressingModeField::decode(instr->opcode());
610 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
611 // and addressing mode just happens to work out. The "addl"/"subl" forms
612 // in these cases are faster based on measurements.
613 if (mode == kMode_MI) {
614 __ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
615 } else if (i.InputRegister(0).is(i.OutputRegister())) {
616 if (mode == kMode_MRI) {
617 int32_t constant_summand = i.InputInt32(1);
618 if (constant_summand > 0) {
619 __ add(i.OutputRegister(), Immediate(constant_summand));
620 } else if (constant_summand < 0) {
621 __ sub(i.OutputRegister(), Immediate(-constant_summand));
622 }
623 } else if (mode == kMode_MR1) {
624 if (i.InputRegister(1).is(i.OutputRegister())) {
625 __ shl(i.OutputRegister(), 1);
626 } else {
627 __ lea(i.OutputRegister(), i.MemoryOperand());
628 }
629 } else if (mode == kMode_M2) {
630 __ shl(i.OutputRegister(), 1);
631 } else if (mode == kMode_M4) {
632 __ shl(i.OutputRegister(), 2);
633 } else if (mode == kMode_M8) {
634 __ shl(i.OutputRegister(), 3);
635 } else {
636 __ lea(i.OutputRegister(), i.MemoryOperand());
637 }
638 } else {
639 __ lea(i.OutputRegister(), i.MemoryOperand());
640 }
641 break;
642 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000643 case kIA32Push:
644 if (HasImmediateInput(instr, 0)) {
645 __ push(i.InputImmediate(0));
646 } else {
647 __ push(i.InputOperand(0));
648 }
649 break;
650 case kIA32StoreWriteBarrier: {
651 Register object = i.InputRegister(0);
652 Register index = i.InputRegister(1);
653 Register value = i.InputRegister(2);
654 __ mov(Operand(object, index, times_1, 0), value);
655 __ lea(index, Operand(object, index, times_1, 0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400656 SaveFPRegsMode mode =
657 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000658 __ RecordWrite(object, index, value, mode);
659 break;
660 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400661 case kCheckedLoadInt8:
662 ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
663 break;
664 case kCheckedLoadUint8:
665 ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
666 break;
667 case kCheckedLoadInt16:
668 ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
669 break;
670 case kCheckedLoadUint16:
671 ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
672 break;
673 case kCheckedLoadWord32:
674 ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
675 break;
676 case kCheckedLoadFloat32:
677 ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
678 break;
679 case kCheckedLoadFloat64:
680 ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
681 break;
682 case kCheckedStoreWord8:
683 ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
684 break;
685 case kCheckedStoreWord16:
686 ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
687 break;
688 case kCheckedStoreWord32:
689 ASSEMBLE_CHECKED_STORE_INTEGER(mov);
690 break;
691 case kCheckedStoreFloat32:
692 ASSEMBLE_CHECKED_STORE_FLOAT(movss);
693 break;
694 case kCheckedStoreFloat64:
695 ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
696 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000697 }
698}
699
700
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400701// Assembles a branch after an instruction.
702void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000703 IA32OperandConverter i(this, instr);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400704 Label::Distance flabel_distance =
705 branch->fallthru ? Label::kNear : Label::kFar;
706 Label* tlabel = branch->true_label;
707 Label* flabel = branch->false_label;
708 switch (branch->condition) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000709 case kUnorderedEqual:
710 __ j(parity_even, flabel, flabel_distance);
711 // Fall through.
712 case kEqual:
713 __ j(equal, tlabel);
714 break;
715 case kUnorderedNotEqual:
716 __ j(parity_even, tlabel);
717 // Fall through.
718 case kNotEqual:
719 __ j(not_equal, tlabel);
720 break;
721 case kSignedLessThan:
722 __ j(less, tlabel);
723 break;
724 case kSignedGreaterThanOrEqual:
725 __ j(greater_equal, tlabel);
726 break;
727 case kSignedLessThanOrEqual:
728 __ j(less_equal, tlabel);
729 break;
730 case kSignedGreaterThan:
731 __ j(greater, tlabel);
732 break;
733 case kUnorderedLessThan:
734 __ j(parity_even, flabel, flabel_distance);
735 // Fall through.
736 case kUnsignedLessThan:
737 __ j(below, tlabel);
738 break;
739 case kUnorderedGreaterThanOrEqual:
740 __ j(parity_even, tlabel);
741 // Fall through.
742 case kUnsignedGreaterThanOrEqual:
743 __ j(above_equal, tlabel);
744 break;
745 case kUnorderedLessThanOrEqual:
746 __ j(parity_even, flabel, flabel_distance);
747 // Fall through.
748 case kUnsignedLessThanOrEqual:
749 __ j(below_equal, tlabel);
750 break;
751 case kUnorderedGreaterThan:
752 __ j(parity_even, tlabel);
753 // Fall through.
754 case kUnsignedGreaterThan:
755 __ j(above, tlabel);
756 break;
757 case kOverflow:
758 __ j(overflow, tlabel);
759 break;
760 case kNotOverflow:
761 __ j(no_overflow, tlabel);
762 break;
763 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400764 // Add a jump if not falling through to the next block.
765 if (!branch->fallthru) __ jmp(flabel);
766}
767
768
769void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
770 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000771}
772
773
774// Assembles boolean materializations after an instruction.
775void CodeGenerator::AssembleArchBoolean(Instruction* instr,
776 FlagsCondition condition) {
777 IA32OperandConverter i(this, instr);
778 Label done;
779
780 // Materialize a full 32-bit 1 or 0 value. The result register is always the
781 // last output of the instruction.
782 Label check;
783 DCHECK_NE(0, instr->OutputCount());
784 Register reg = i.OutputRegister(instr->OutputCount() - 1);
785 Condition cc = no_condition;
786 switch (condition) {
787 case kUnorderedEqual:
788 __ j(parity_odd, &check, Label::kNear);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400789 __ Move(reg, Immediate(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000790 __ jmp(&done, Label::kNear);
791 // Fall through.
792 case kEqual:
793 cc = equal;
794 break;
795 case kUnorderedNotEqual:
796 __ j(parity_odd, &check, Label::kNear);
797 __ mov(reg, Immediate(1));
798 __ jmp(&done, Label::kNear);
799 // Fall through.
800 case kNotEqual:
801 cc = not_equal;
802 break;
803 case kSignedLessThan:
804 cc = less;
805 break;
806 case kSignedGreaterThanOrEqual:
807 cc = greater_equal;
808 break;
809 case kSignedLessThanOrEqual:
810 cc = less_equal;
811 break;
812 case kSignedGreaterThan:
813 cc = greater;
814 break;
815 case kUnorderedLessThan:
816 __ j(parity_odd, &check, Label::kNear);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400817 __ Move(reg, Immediate(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000818 __ jmp(&done, Label::kNear);
819 // Fall through.
820 case kUnsignedLessThan:
821 cc = below;
822 break;
823 case kUnorderedGreaterThanOrEqual:
824 __ j(parity_odd, &check, Label::kNear);
825 __ mov(reg, Immediate(1));
826 __ jmp(&done, Label::kNear);
827 // Fall through.
828 case kUnsignedGreaterThanOrEqual:
829 cc = above_equal;
830 break;
831 case kUnorderedLessThanOrEqual:
832 __ j(parity_odd, &check, Label::kNear);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400833 __ Move(reg, Immediate(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000834 __ jmp(&done, Label::kNear);
835 // Fall through.
836 case kUnsignedLessThanOrEqual:
837 cc = below_equal;
838 break;
839 case kUnorderedGreaterThan:
840 __ j(parity_odd, &check, Label::kNear);
841 __ mov(reg, Immediate(1));
842 __ jmp(&done, Label::kNear);
843 // Fall through.
844 case kUnsignedGreaterThan:
845 cc = above;
846 break;
847 case kOverflow:
848 cc = overflow;
849 break;
850 case kNotOverflow:
851 cc = no_overflow;
852 break;
853 }
854 __ bind(&check);
855 if (reg.is_byte_register()) {
856 // setcc for byte registers (al, bl, cl, dl).
857 __ setcc(cc, reg);
858 __ movzx_b(reg, reg);
859 } else {
860 // Emit a branch to set a register to either 1 or 0.
861 Label set;
862 __ j(cc, &set, Label::kNear);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400863 __ Move(reg, Immediate(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000864 __ jmp(&done, Label::kNear);
865 __ bind(&set);
866 __ mov(reg, Immediate(1));
867 }
868 __ bind(&done);
869}
870
871
872void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
873 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
874 isolate(), deoptimization_id, Deoptimizer::LAZY);
875 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
876}
877
878
879// The calling convention for JSFunctions on IA32 passes arguments on the
880// stack and the JSFunction and context in EDI and ESI, respectively, thus
881// the steps of the call look as follows:
882
883// --{ before the call instruction }--------------------------------------------
884// | caller frame |
885// ^ esp ^ ebp
886
887// --{ push arguments and setup ESI, EDI }--------------------------------------
888// | args + receiver | caller frame |
889// ^ esp ^ ebp
890// [edi = JSFunction, esi = context]
891
892// --{ call [edi + kCodeEntryOffset] }------------------------------------------
893// | RET | args + receiver | caller frame |
894// ^ esp ^ ebp
895
896// =={ prologue of called function }============================================
897// --{ push ebp }---------------------------------------------------------------
898// | FP | RET | args + receiver | caller frame |
899// ^ esp ^ ebp
900
901// --{ mov ebp, esp }-----------------------------------------------------------
902// | FP | RET | args + receiver | caller frame |
903// ^ ebp,esp
904
905// --{ push esi }---------------------------------------------------------------
906// | CTX | FP | RET | args + receiver | caller frame |
907// ^esp ^ ebp
908
909// --{ push edi }---------------------------------------------------------------
910// | FNC | CTX | FP | RET | args + receiver | caller frame |
911// ^esp ^ ebp
912
913// --{ subi esp, #N }-----------------------------------------------------------
914// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
915// ^esp ^ ebp
916
917// =={ body of called function }================================================
918
919// =={ epilogue of called function }============================================
920// --{ mov esp, ebp }-----------------------------------------------------------
921// | FP | RET | args + receiver | caller frame |
922// ^ esp,ebp
923
924// --{ pop ebp }-----------------------------------------------------------
925// | | RET | args + receiver | caller frame |
926// ^ esp ^ ebp
927
928// --{ ret #A+1 }-----------------------------------------------------------
929// | | caller frame |
930// ^ esp ^ ebp
931
932
933// Runtime function calls are accomplished by doing a stub call to the
934// CEntryStub (a real code object). On IA32 passes arguments on the
935// stack, the number of arguments in EAX, the address of the runtime function
936// in EBX, and the context in ESI.
937
938// --{ before the call instruction }--------------------------------------------
939// | caller frame |
940// ^ esp ^ ebp
941
942// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
943// | args + receiver | caller frame |
944// ^ esp ^ ebp
945// [eax = #args, ebx = runtime function, esi = context]
946
947// --{ call #CEntryStub }-------------------------------------------------------
948// | RET | args + receiver | caller frame |
949// ^ esp ^ ebp
950
951// =={ body of runtime function }===============================================
952
953// --{ runtime returns }--------------------------------------------------------
954// | caller frame |
955// ^ esp ^ ebp
956
957// Other custom linkages (e.g. for calling directly into and out of C++) may
958// need to save callee-saved registers on the stack, which is done in the
959// function prologue of generated code.
960
961// --{ before the call instruction }--------------------------------------------
962// | caller frame |
963// ^ esp ^ ebp
964
965// --{ set up arguments in registers on stack }---------------------------------
966// | args | caller frame |
967// ^ esp ^ ebp
968// [r0 = arg0, r1 = arg1, ...]
969
970// --{ call code }--------------------------------------------------------------
971// | RET | args | caller frame |
972// ^ esp ^ ebp
973
974// =={ prologue of called function }============================================
975// --{ push ebp }---------------------------------------------------------------
976// | FP | RET | args | caller frame |
977// ^ esp ^ ebp
978
979// --{ mov ebp, esp }-----------------------------------------------------------
980// | FP | RET | args | caller frame |
981// ^ ebp,esp
982
983// --{ save registers }---------------------------------------------------------
984// | regs | FP | RET | args | caller frame |
985// ^ esp ^ ebp
986
987// --{ subi esp, #N }-----------------------------------------------------------
988// | callee frame | regs | FP | RET | args | caller frame |
989// ^esp ^ ebp
990
991// =={ body of called function }================================================
992
993// =={ epilogue of called function }============================================
994// --{ restore registers }------------------------------------------------------
995// | regs | FP | RET | args | caller frame |
996// ^ esp ^ ebp
997
998// --{ mov esp, ebp }-----------------------------------------------------------
999// | FP | RET | args | caller frame |
1000// ^ esp,ebp
1001
1002// --{ pop ebp }----------------------------------------------------------------
1003// | RET | args | caller frame |
1004// ^ esp ^ ebp
1005
1006
1007void CodeGenerator::AssemblePrologue() {
1008 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001009 Frame* frame = this->frame();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001010 int stack_slots = frame->GetSpillSlotCount();
1011 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1012 // Assemble a prologue similar the to cdecl calling convention.
1013 __ push(ebp);
1014 __ mov(ebp, esp);
1015 const RegList saves = descriptor->CalleeSavedRegisters();
1016 if (saves != 0) { // Save callee-saved registers.
1017 int register_save_area_size = 0;
1018 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1019 if (!((1 << i) & saves)) continue;
1020 __ push(Register::from_code(i));
1021 register_save_area_size += kPointerSize;
1022 }
1023 frame->SetRegisterSaveAreaSize(register_save_area_size);
1024 }
1025 } else if (descriptor->IsJSFunctionCall()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001026 CompilationInfo* info = this->info();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001027 __ Prologue(info->IsCodePreAgingActive());
1028 frame->SetRegisterSaveAreaSize(
1029 StandardFrameConstants::kFixedFrameSizeFromFp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001030 } else {
1031 __ StubPrologue();
1032 frame->SetRegisterSaveAreaSize(
1033 StandardFrameConstants::kFixedFrameSizeFromFp);
1034 }
1035 if (stack_slots > 0) {
1036 __ sub(esp, Immediate(stack_slots * kPointerSize));
1037 }
1038}
1039
1040
1041void CodeGenerator::AssembleReturn() {
1042 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1043 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1044 const RegList saves = descriptor->CalleeSavedRegisters();
1045 if (frame()->GetRegisterSaveAreaSize() > 0) {
1046 // Remove this frame's spill slots first.
1047 int stack_slots = frame()->GetSpillSlotCount();
1048 if (stack_slots > 0) {
1049 __ add(esp, Immediate(stack_slots * kPointerSize));
1050 }
1051 // Restore registers.
1052 if (saves != 0) {
1053 for (int i = 0; i < Register::kNumRegisters; i++) {
1054 if (!((1 << i) & saves)) continue;
1055 __ pop(Register::from_code(i));
1056 }
1057 }
1058 __ pop(ebp); // Pop caller's frame pointer.
1059 __ ret(0);
1060 } else {
1061 // No saved registers.
1062 __ mov(esp, ebp); // Move stack pointer back to frame pointer.
1063 __ pop(ebp); // Pop caller's frame pointer.
1064 __ ret(0);
1065 }
1066 } else {
1067 __ mov(esp, ebp); // Move stack pointer back to frame pointer.
1068 __ pop(ebp); // Pop caller's frame pointer.
1069 int pop_count = descriptor->IsJSFunctionCall()
1070 ? static_cast<int>(descriptor->JSParameterCount())
1071 : 0;
1072 __ ret(pop_count * kPointerSize);
1073 }
1074}
1075
1076
1077void CodeGenerator::AssembleMove(InstructionOperand* source,
1078 InstructionOperand* destination) {
1079 IA32OperandConverter g(this, NULL);
1080 // Dispatch on the source and destination operand kinds. Not all
1081 // combinations are possible.
1082 if (source->IsRegister()) {
1083 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1084 Register src = g.ToRegister(source);
1085 Operand dst = g.ToOperand(destination);
1086 __ mov(dst, src);
1087 } else if (source->IsStackSlot()) {
1088 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1089 Operand src = g.ToOperand(source);
1090 if (destination->IsRegister()) {
1091 Register dst = g.ToRegister(destination);
1092 __ mov(dst, src);
1093 } else {
1094 Operand dst = g.ToOperand(destination);
1095 __ push(src);
1096 __ pop(dst);
1097 }
1098 } else if (source->IsConstant()) {
1099 Constant src_constant = g.ToConstant(source);
1100 if (src_constant.type() == Constant::kHeapObject) {
1101 Handle<HeapObject> src = src_constant.ToHeapObject();
1102 if (destination->IsRegister()) {
1103 Register dst = g.ToRegister(destination);
1104 __ LoadHeapObject(dst, src);
1105 } else {
1106 DCHECK(destination->IsStackSlot());
1107 Operand dst = g.ToOperand(destination);
1108 AllowDeferredHandleDereference embedding_raw_address;
1109 if (isolate()->heap()->InNewSpace(*src)) {
1110 __ PushHeapObject(src);
1111 __ pop(dst);
1112 } else {
1113 __ mov(dst, src);
1114 }
1115 }
1116 } else if (destination->IsRegister()) {
1117 Register dst = g.ToRegister(destination);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001118 __ Move(dst, g.ToImmediate(source));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001119 } else if (destination->IsStackSlot()) {
1120 Operand dst = g.ToOperand(destination);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001121 __ Move(dst, g.ToImmediate(source));
1122 } else if (src_constant.type() == Constant::kFloat32) {
1123 // TODO(turbofan): Can we do better here?
1124 uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001125 if (destination->IsDoubleRegister()) {
1126 XMMRegister dst = g.ToDoubleRegister(destination);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001127 __ Move(dst, src);
1128 } else {
1129 DCHECK(destination->IsDoubleStackSlot());
1130 Operand dst = g.ToOperand(destination);
1131 __ Move(dst, Immediate(src));
1132 }
1133 } else {
1134 DCHECK_EQ(Constant::kFloat64, src_constant.type());
1135 uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
1136 uint32_t lower = static_cast<uint32_t>(src);
1137 uint32_t upper = static_cast<uint32_t>(src >> 32);
1138 if (destination->IsDoubleRegister()) {
1139 XMMRegister dst = g.ToDoubleRegister(destination);
1140 __ Move(dst, src);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001141 } else {
1142 DCHECK(destination->IsDoubleStackSlot());
1143 Operand dst0 = g.ToOperand(destination);
1144 Operand dst1 = g.HighOperand(destination);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001145 __ Move(dst0, Immediate(lower));
1146 __ Move(dst1, Immediate(upper));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001147 }
1148 }
1149 } else if (source->IsDoubleRegister()) {
1150 XMMRegister src = g.ToDoubleRegister(source);
1151 if (destination->IsDoubleRegister()) {
1152 XMMRegister dst = g.ToDoubleRegister(destination);
1153 __ movaps(dst, src);
1154 } else {
1155 DCHECK(destination->IsDoubleStackSlot());
1156 Operand dst = g.ToOperand(destination);
1157 __ movsd(dst, src);
1158 }
1159 } else if (source->IsDoubleStackSlot()) {
1160 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1161 Operand src = g.ToOperand(source);
1162 if (destination->IsDoubleRegister()) {
1163 XMMRegister dst = g.ToDoubleRegister(destination);
1164 __ movsd(dst, src);
1165 } else {
1166 // We rely on having xmm0 available as a fixed scratch register.
1167 Operand dst = g.ToOperand(destination);
1168 __ movsd(xmm0, src);
1169 __ movsd(dst, xmm0);
1170 }
1171 } else {
1172 UNREACHABLE();
1173 }
1174}
1175
1176
1177void CodeGenerator::AssembleSwap(InstructionOperand* source,
1178 InstructionOperand* destination) {
1179 IA32OperandConverter g(this, NULL);
1180 // Dispatch on the source and destination operand kinds. Not all
1181 // combinations are possible.
1182 if (source->IsRegister() && destination->IsRegister()) {
1183 // Register-register.
1184 Register src = g.ToRegister(source);
1185 Register dst = g.ToRegister(destination);
1186 __ xchg(dst, src);
1187 } else if (source->IsRegister() && destination->IsStackSlot()) {
1188 // Register-memory.
1189 __ xchg(g.ToRegister(source), g.ToOperand(destination));
1190 } else if (source->IsStackSlot() && destination->IsStackSlot()) {
1191 // Memory-memory.
1192 Operand src = g.ToOperand(source);
1193 Operand dst = g.ToOperand(destination);
1194 __ push(dst);
1195 __ push(src);
1196 __ pop(dst);
1197 __ pop(src);
1198 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1199 // XMM register-register swap. We rely on having xmm0
1200 // available as a fixed scratch register.
1201 XMMRegister src = g.ToDoubleRegister(source);
1202 XMMRegister dst = g.ToDoubleRegister(destination);
1203 __ movaps(xmm0, src);
1204 __ movaps(src, dst);
1205 __ movaps(dst, xmm0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001206 } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001207 // XMM register-memory swap. We rely on having xmm0
1208 // available as a fixed scratch register.
1209 XMMRegister reg = g.ToDoubleRegister(source);
1210 Operand other = g.ToOperand(destination);
1211 __ movsd(xmm0, other);
1212 __ movsd(other, reg);
1213 __ movaps(reg, xmm0);
1214 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
1215 // Double-width memory-to-memory.
1216 Operand src0 = g.ToOperand(source);
1217 Operand src1 = g.HighOperand(source);
1218 Operand dst0 = g.ToOperand(destination);
1219 Operand dst1 = g.HighOperand(destination);
1220 __ movsd(xmm0, dst0); // Save destination in xmm0.
1221 __ push(src0); // Then use stack to copy source to destination.
1222 __ pop(dst0);
1223 __ push(src1);
1224 __ pop(dst1);
1225 __ movsd(src0, xmm0);
1226 } else {
1227 // No other combinations are possible.
1228 UNREACHABLE();
1229 }
1230}
1231
1232
1233void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1234
1235
1236void CodeGenerator::EnsureSpaceForLazyDeopt() {
1237 int space_needed = Deoptimizer::patch_size();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001238 if (!info()->IsStub()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001239 // Ensure that we have enough space after the previous lazy-bailout
1240 // instruction for patching the code here.
1241 int current_pc = masm()->pc_offset();
1242 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1243 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1244 __ Nop(padding_size);
1245 }
1246 }
1247 MarkLazyDeoptSite();
1248}
1249
1250#undef __
1251
1252} // namespace compiler
1253} // namespace internal
1254} // namespace v8