blob: 5fe7bad81d93ea93fd4cded75390f505d534e40e [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/instruction-selector-impl.h"
6#include "src/compiler/node-matchers.h"
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12// Adds X64-specific methods for generating operands.
13class X64OperandGenerator FINAL : public OperandGenerator {
14 public:
15 explicit X64OperandGenerator(InstructionSelector* selector)
16 : OperandGenerator(selector) {}
17
18 InstructionOperand* TempRegister(Register reg) {
19 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
20 Register::ToAllocationIndex(reg));
21 }
22
23 InstructionOperand* UseByteRegister(Node* node) {
24 // TODO(dcarney): relax constraint.
25 return UseFixed(node, rdx);
26 }
27
28 InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); }
29
30 bool CanBeImmediate(Node* node) {
31 switch (node->opcode()) {
32 case IrOpcode::kInt32Constant:
33 return true;
34 default:
35 return false;
36 }
37 }
38
39 bool CanBeImmediate64(Node* node) {
40 switch (node->opcode()) {
41 case IrOpcode::kInt32Constant:
42 return true;
43 case IrOpcode::kNumberConstant:
44 return true;
45 case IrOpcode::kHeapConstant: {
46 // Constants in new space cannot be used as immediates in V8 because
47 // the GC does not scan code objects when collecting the new generation.
48 Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
49 return !isolate()->heap()->InNewSpace(*value.handle());
50 }
51 default:
52 return false;
53 }
54 }
55};
56
57
58void InstructionSelector::VisitLoad(Node* node) {
59 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
60 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
61 X64OperandGenerator g(this);
62 Node* base = node->InputAt(0);
63 Node* index = node->InputAt(1);
64
65 ArchOpcode opcode;
66 // TODO(titzer): signed/unsigned small loads
67 switch (rep) {
68 case kRepFloat32:
69 opcode = kX64Movss;
70 break;
71 case kRepFloat64:
72 opcode = kX64Movsd;
73 break;
74 case kRepBit: // Fall through.
75 case kRepWord8:
76 opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
77 break;
78 case kRepWord16:
79 opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
80 break;
81 case kRepWord32:
82 opcode = kX64Movl;
83 break;
84 case kRepTagged: // Fall through.
85 case kRepWord64:
86 opcode = kX64Movq;
87 break;
88 default:
89 UNREACHABLE();
90 return;
91 }
92 if (g.CanBeImmediate(base)) {
93 // load [#base + %index]
94 Emit(opcode | AddressingModeField::encode(kMode_MRI),
95 g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
96 } else if (g.CanBeImmediate(index)) { // load [%base + #index]
97 Emit(opcode | AddressingModeField::encode(kMode_MRI),
98 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
99 } else { // load [%base + %index + K]
100 Emit(opcode | AddressingModeField::encode(kMode_MR1I),
101 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
102 }
103 // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
104}
105
106
107void InstructionSelector::VisitStore(Node* node) {
108 X64OperandGenerator g(this);
109 Node* base = node->InputAt(0);
110 Node* index = node->InputAt(1);
111 Node* value = node->InputAt(2);
112
113 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
114 MachineType rep = RepresentationOf(store_rep.machine_type());
115 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
116 DCHECK(rep == kRepTagged);
117 // TODO(dcarney): refactor RecordWrite function to take temp registers
118 // and pass them here instead of using fixed regs
119 // TODO(dcarney): handle immediate indices.
120 InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
121 Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
122 g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
123 temps);
124 return;
125 }
126 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
127 InstructionOperand* val;
128 if (g.CanBeImmediate(value)) {
129 val = g.UseImmediate(value);
130 } else if (rep == kRepWord8 || rep == kRepBit) {
131 val = g.UseByteRegister(value);
132 } else {
133 val = g.UseRegister(value);
134 }
135 ArchOpcode opcode;
136 switch (rep) {
137 case kRepFloat32:
138 opcode = kX64Movss;
139 break;
140 case kRepFloat64:
141 opcode = kX64Movsd;
142 break;
143 case kRepBit: // Fall through.
144 case kRepWord8:
145 opcode = kX64Movb;
146 break;
147 case kRepWord16:
148 opcode = kX64Movw;
149 break;
150 case kRepWord32:
151 opcode = kX64Movl;
152 break;
153 case kRepTagged: // Fall through.
154 case kRepWord64:
155 opcode = kX64Movq;
156 break;
157 default:
158 UNREACHABLE();
159 return;
160 }
161 if (g.CanBeImmediate(base)) {
162 // store [#base + %index], %|#value
163 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
164 g.UseRegister(index), g.UseImmediate(base), val);
165 } else if (g.CanBeImmediate(index)) { // store [%base + #index], %|#value
166 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
167 g.UseRegister(base), g.UseImmediate(index), val);
168 } else { // store [%base + %index], %|#value
169 Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
170 g.UseRegister(base), g.UseRegister(index), val);
171 }
172 // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
173}
174
175
176// Shared routine for multiple binary operations.
177static void VisitBinop(InstructionSelector* selector, Node* node,
178 InstructionCode opcode, FlagsContinuation* cont) {
179 X64OperandGenerator g(selector);
180 Int32BinopMatcher m(node);
181 InstructionOperand* inputs[4];
182 size_t input_count = 0;
183 InstructionOperand* outputs[2];
184 size_t output_count = 0;
185
186 // TODO(turbofan): match complex addressing modes.
187 // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
188 // this might be the last use and therefore its register can be reused.
189 if (g.CanBeImmediate(m.right().node())) {
190 inputs[input_count++] = g.Use(m.left().node());
191 inputs[input_count++] = g.UseImmediate(m.right().node());
192 } else {
193 inputs[input_count++] = g.UseRegister(m.left().node());
194 inputs[input_count++] = g.Use(m.right().node());
195 }
196
197 if (cont->IsBranch()) {
198 inputs[input_count++] = g.Label(cont->true_block());
199 inputs[input_count++] = g.Label(cont->false_block());
200 }
201
202 outputs[output_count++] = g.DefineSameAsFirst(node);
203 if (cont->IsSet()) {
204 outputs[output_count++] = g.DefineAsRegister(cont->result());
205 }
206
207 DCHECK_NE(0, input_count);
208 DCHECK_NE(0, output_count);
209 DCHECK_GE(arraysize(inputs), input_count);
210 DCHECK_GE(arraysize(outputs), output_count);
211
212 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
213 outputs, input_count, inputs);
214 if (cont->IsBranch()) instr->MarkAsControl();
215}
216
217
218// Shared routine for multiple binary operations.
219static void VisitBinop(InstructionSelector* selector, Node* node,
220 InstructionCode opcode) {
221 FlagsContinuation cont;
222 VisitBinop(selector, node, opcode, &cont);
223}
224
225
226void InstructionSelector::VisitWord32And(Node* node) {
227 VisitBinop(this, node, kX64And32);
228}
229
230
231void InstructionSelector::VisitWord64And(Node* node) {
232 VisitBinop(this, node, kX64And);
233}
234
235
236void InstructionSelector::VisitWord32Or(Node* node) {
237 VisitBinop(this, node, kX64Or32);
238}
239
240
241void InstructionSelector::VisitWord64Or(Node* node) {
242 VisitBinop(this, node, kX64Or);
243}
244
245
246void InstructionSelector::VisitWord32Xor(Node* node) {
247 X64OperandGenerator g(this);
248 Uint32BinopMatcher m(node);
249 if (m.right().Is(-1)) {
250 Emit(kX64Not32, g.DefineSameAsFirst(node), g.Use(m.left().node()));
251 } else {
252 VisitBinop(this, node, kX64Xor32);
253 }
254}
255
256
257void InstructionSelector::VisitWord64Xor(Node* node) {
258 X64OperandGenerator g(this);
259 Uint64BinopMatcher m(node);
260 if (m.right().Is(-1)) {
261 Emit(kX64Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
262 } else {
263 VisitBinop(this, node, kX64Xor);
264 }
265}
266
267
268// Shared routine for multiple 32-bit shift operations.
269// TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
270static void VisitWord32Shift(InstructionSelector* selector, Node* node,
271 ArchOpcode opcode) {
272 X64OperandGenerator g(selector);
273 Node* left = node->InputAt(0);
274 Node* right = node->InputAt(1);
275
276 // TODO(turbofan): assembler only supports some addressing modes for shifts.
277 if (g.CanBeImmediate(right)) {
278 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
279 g.UseImmediate(right));
280 } else {
281 Int32BinopMatcher m(node);
282 if (m.right().IsWord32And()) {
283 Int32BinopMatcher mright(right);
284 if (mright.right().Is(0x1F)) {
285 right = mright.left().node();
286 }
287 }
288 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
289 g.UseFixed(right, rcx));
290 }
291}
292
293
294// Shared routine for multiple 64-bit shift operations.
295// TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
296static void VisitWord64Shift(InstructionSelector* selector, Node* node,
297 ArchOpcode opcode) {
298 X64OperandGenerator g(selector);
299 Node* left = node->InputAt(0);
300 Node* right = node->InputAt(1);
301
302 // TODO(turbofan): assembler only supports some addressing modes for shifts.
303 if (g.CanBeImmediate(right)) {
304 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
305 g.UseImmediate(right));
306 } else {
307 Int64BinopMatcher m(node);
308 if (m.right().IsWord64And()) {
309 Int64BinopMatcher mright(right);
310 if (mright.right().Is(0x3F)) {
311 right = mright.left().node();
312 }
313 }
314 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
315 g.UseFixed(right, rcx));
316 }
317}
318
319
320void InstructionSelector::VisitWord32Shl(Node* node) {
321 VisitWord32Shift(this, node, kX64Shl32);
322}
323
324
325void InstructionSelector::VisitWord64Shl(Node* node) {
326 VisitWord64Shift(this, node, kX64Shl);
327}
328
329
330void InstructionSelector::VisitWord32Shr(Node* node) {
331 VisitWord32Shift(this, node, kX64Shr32);
332}
333
334
335void InstructionSelector::VisitWord64Shr(Node* node) {
336 VisitWord64Shift(this, node, kX64Shr);
337}
338
339
340void InstructionSelector::VisitWord32Sar(Node* node) {
341 VisitWord32Shift(this, node, kX64Sar32);
342}
343
344
345void InstructionSelector::VisitWord64Sar(Node* node) {
346 VisitWord64Shift(this, node, kX64Sar);
347}
348
349
350void InstructionSelector::VisitWord32Ror(Node* node) {
351 VisitWord32Shift(this, node, kX64Ror32);
352}
353
354
355void InstructionSelector::VisitWord64Ror(Node* node) {
356 VisitWord64Shift(this, node, kX64Ror);
357}
358
359
360void InstructionSelector::VisitInt32Add(Node* node) {
361 VisitBinop(this, node, kX64Add32);
362}
363
364
365void InstructionSelector::VisitInt64Add(Node* node) {
366 VisitBinop(this, node, kX64Add);
367}
368
369
370void InstructionSelector::VisitInt32Sub(Node* node) {
371 X64OperandGenerator g(this);
372 Int32BinopMatcher m(node);
373 if (m.left().Is(0)) {
374 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.Use(m.right().node()));
375 } else {
376 VisitBinop(this, node, kX64Sub32);
377 }
378}
379
380
381void InstructionSelector::VisitInt64Sub(Node* node) {
382 X64OperandGenerator g(this);
383 Int64BinopMatcher m(node);
384 if (m.left().Is(0)) {
385 Emit(kX64Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
386 } else {
387 VisitBinop(this, node, kX64Sub);
388 }
389}
390
391
392static void VisitMul(InstructionSelector* selector, Node* node,
393 ArchOpcode opcode) {
394 X64OperandGenerator g(selector);
395 Node* left = node->InputAt(0);
396 Node* right = node->InputAt(1);
397 if (g.CanBeImmediate(right)) {
398 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
399 g.UseImmediate(right));
400 } else if (g.CanBeImmediate(left)) {
401 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(right),
402 g.UseImmediate(left));
403 } else {
404 // TODO(turbofan): select better left operand.
405 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
406 g.Use(right));
407 }
408}
409
410
411void InstructionSelector::VisitInt32Mul(Node* node) {
412 VisitMul(this, node, kX64Imul32);
413}
414
415
416void InstructionSelector::VisitInt64Mul(Node* node) {
417 VisitMul(this, node, kX64Imul);
418}
419
420
421static void VisitDiv(InstructionSelector* selector, Node* node,
422 ArchOpcode opcode) {
423 X64OperandGenerator g(selector);
424 InstructionOperand* temps[] = {g.TempRegister(rdx)};
425 selector->Emit(
426 opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
427 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
428}
429
430
431void InstructionSelector::VisitInt32Div(Node* node) {
432 VisitDiv(this, node, kX64Idiv32);
433}
434
435
436void InstructionSelector::VisitInt64Div(Node* node) {
437 VisitDiv(this, node, kX64Idiv);
438}
439
440
441void InstructionSelector::VisitInt32UDiv(Node* node) {
442 VisitDiv(this, node, kX64Udiv32);
443}
444
445
446void InstructionSelector::VisitInt64UDiv(Node* node) {
447 VisitDiv(this, node, kX64Udiv);
448}
449
450
451static void VisitMod(InstructionSelector* selector, Node* node,
452 ArchOpcode opcode) {
453 X64OperandGenerator g(selector);
454 InstructionOperand* temps[] = {g.TempRegister(rax), g.TempRegister(rdx)};
455 selector->Emit(
456 opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
457 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
458}
459
460
461void InstructionSelector::VisitInt32Mod(Node* node) {
462 VisitMod(this, node, kX64Idiv32);
463}
464
465
466void InstructionSelector::VisitInt64Mod(Node* node) {
467 VisitMod(this, node, kX64Idiv);
468}
469
470
471void InstructionSelector::VisitInt32UMod(Node* node) {
472 VisitMod(this, node, kX64Udiv32);
473}
474
475
476void InstructionSelector::VisitInt64UMod(Node* node) {
477 VisitMod(this, node, kX64Udiv);
478}
479
480
481void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
482 X64OperandGenerator g(this);
483 Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
484}
485
486
487void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
488 X64OperandGenerator g(this);
489 // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
490 Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node),
491 g.UseRegister(node->InputAt(0)));
492}
493
494
495void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
496 X64OperandGenerator g(this);
497 Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
498}
499
500
501void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
502 X64OperandGenerator g(this);
503 Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
504}
505
506
507void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
508 X64OperandGenerator g(this);
509 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
510}
511
512
513void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
514 X64OperandGenerator g(this);
515 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
516}
517
518
519void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
520 X64OperandGenerator g(this);
521 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
522}
523
524
525void InstructionSelector::VisitFloat64Add(Node* node) {
526 X64OperandGenerator g(this);
527 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
528 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
529}
530
531
532void InstructionSelector::VisitFloat64Sub(Node* node) {
533 X64OperandGenerator g(this);
534 Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
535 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
536}
537
538
539void InstructionSelector::VisitFloat64Mul(Node* node) {
540 X64OperandGenerator g(this);
541 Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
542 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
543}
544
545
546void InstructionSelector::VisitFloat64Div(Node* node) {
547 X64OperandGenerator g(this);
548 Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
549 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
550}
551
552
553void InstructionSelector::VisitFloat64Mod(Node* node) {
554 X64OperandGenerator g(this);
555 InstructionOperand* temps[] = {g.TempRegister(rax)};
556 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
557 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
558 temps);
559}
560
561
562void InstructionSelector::VisitFloat64Sqrt(Node* node) {
563 X64OperandGenerator g(this);
564 Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
565}
566
567
568void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
569 FlagsContinuation* cont) {
570 VisitBinop(this, node, kX64Add32, cont);
571}
572
573
574void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
575 FlagsContinuation* cont) {
576 VisitBinop(this, node, kX64Sub32, cont);
577}
578
579
580// Shared routine for multiple compare operations.
581static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
582 InstructionOperand* left, InstructionOperand* right,
583 FlagsContinuation* cont) {
584 X64OperandGenerator g(selector);
585 opcode = cont->Encode(opcode);
586 if (cont->IsBranch()) {
587 selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
588 g.Label(cont->false_block()))->MarkAsControl();
589 } else {
590 DCHECK(cont->IsSet());
591 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
592 }
593}
594
595
596// Shared routine for multiple word compare operations.
597static void VisitWordCompare(InstructionSelector* selector, Node* node,
598 InstructionCode opcode, FlagsContinuation* cont,
599 bool commutative) {
600 X64OperandGenerator g(selector);
601 Node* left = node->InputAt(0);
602 Node* right = node->InputAt(1);
603
604 // Match immediates on left or right side of comparison.
605 if (g.CanBeImmediate(right)) {
606 VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
607 } else if (g.CanBeImmediate(left)) {
608 if (!commutative) cont->Commute();
609 VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
610 } else {
611 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
612 }
613}
614
615
616void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
617 switch (node->opcode()) {
618 case IrOpcode::kInt32Sub:
619 return VisitWordCompare(this, node, kX64Cmp32, cont, false);
620 case IrOpcode::kWord32And:
621 return VisitWordCompare(this, node, kX64Test32, cont, true);
622 default:
623 break;
624 }
625
626 X64OperandGenerator g(this);
627 VisitCompare(this, kX64Test32, g.Use(node), g.TempImmediate(-1), cont);
628}
629
630
631void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
632 switch (node->opcode()) {
633 case IrOpcode::kInt64Sub:
634 return VisitWordCompare(this, node, kX64Cmp, cont, false);
635 case IrOpcode::kWord64And:
636 return VisitWordCompare(this, node, kX64Test, cont, true);
637 default:
638 break;
639 }
640
641 X64OperandGenerator g(this);
642 VisitCompare(this, kX64Test, g.Use(node), g.TempImmediate(-1), cont);
643}
644
645
646void InstructionSelector::VisitWord32Compare(Node* node,
647 FlagsContinuation* cont) {
648 VisitWordCompare(this, node, kX64Cmp32, cont, false);
649}
650
651
652void InstructionSelector::VisitWord64Compare(Node* node,
653 FlagsContinuation* cont) {
654 VisitWordCompare(this, node, kX64Cmp, cont, false);
655}
656
657
658void InstructionSelector::VisitFloat64Compare(Node* node,
659 FlagsContinuation* cont) {
660 X64OperandGenerator g(this);
661 Node* left = node->InputAt(0);
662 Node* right = node->InputAt(1);
663 VisitCompare(this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right), cont);
664}
665
666
667void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
668 BasicBlock* deoptimization) {
669 X64OperandGenerator g(this);
670 CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
671
672 FrameStateDescriptor* frame_state_descriptor = NULL;
673 if (descriptor->NeedsFrameState()) {
674 frame_state_descriptor = GetFrameStateDescriptor(
675 call->InputAt(static_cast<int>(descriptor->InputCount())));
676 }
677
678 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
679
680 // Compute InstructionOperands for inputs and outputs.
681 InitializeCallBuffer(call, &buffer, true, true);
682
683 // TODO(dcarney): stack alignment for c calls.
684 // TODO(dcarney): shadow space on window for c calls.
685 // Push any stack arguments.
686 for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
687 input != buffer.pushed_nodes.rend(); input++) {
688 // TODO(titzer): handle pushing double parameters.
689 Emit(kX64Push, NULL,
690 g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
691 }
692
693 // Select the appropriate opcode based on the call type.
694 InstructionCode opcode;
695 switch (descriptor->kind()) {
696 case CallDescriptor::kCallCodeObject: {
697 opcode = kArchCallCodeObject;
698 break;
699 }
700 case CallDescriptor::kCallJSFunction:
701 opcode = kArchCallJSFunction;
702 break;
703 default:
704 UNREACHABLE();
705 return;
706 }
707 opcode |= MiscField::encode(descriptor->flags());
708
709 // Emit the call instruction.
710 Instruction* call_instr =
711 Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
712 buffer.instruction_args.size(), &buffer.instruction_args.front());
713
714 call_instr->MarkAsCall();
715 if (deoptimization != NULL) {
716 DCHECK(continuation != NULL);
717 call_instr->MarkAsControl();
718 }
719}
720
721} // namespace compiler
722} // namespace internal
723} // namespace v8