blob: 154cd644bd447f17e61795b2dc15128c17d45858 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
7#include "src/ast/scopes.h"
8#include "src/compiler/code-generator-impl.h"
9#include "src/compiler/gap-resolver.h"
10#include "src/compiler/node-matchers.h"
11#include "src/compiler/osr.h"
12#include "src/ppc/macro-assembler-ppc.h"
13
14namespace v8 {
15namespace internal {
16namespace compiler {
17
18#define __ masm()->
19
20
21#define kScratchReg r11
22
23
24// Adds PPC-specific methods to convert InstructionOperands.
25class PPCOperandConverter final : public InstructionOperandConverter {
26 public:
27 PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
28 : InstructionOperandConverter(gen, instr) {}
29
30 size_t OutputCount() { return instr_->OutputCount(); }
31
32 RCBit OutputRCBit() const {
33 switch (instr_->flags_mode()) {
34 case kFlags_branch:
35 case kFlags_set:
36 return SetRC;
37 case kFlags_none:
38 return LeaveRC;
39 }
40 UNREACHABLE();
41 return LeaveRC;
42 }
43
44 bool CompareLogical() const {
45 switch (instr_->flags_condition()) {
46 case kUnsignedLessThan:
47 case kUnsignedGreaterThanOrEqual:
48 case kUnsignedLessThanOrEqual:
49 case kUnsignedGreaterThan:
50 return true;
51 default:
52 return false;
53 }
54 UNREACHABLE();
55 return false;
56 }
57
58 Operand InputImmediate(size_t index) {
59 Constant constant = ToConstant(instr_->InputAt(index));
60 switch (constant.type()) {
61 case Constant::kInt32:
62 return Operand(constant.ToInt32());
63 case Constant::kFloat32:
64 return Operand(
65 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
66 case Constant::kFloat64:
67 return Operand(
68 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
69 case Constant::kInt64:
70#if V8_TARGET_ARCH_PPC64
71 return Operand(constant.ToInt64());
72#endif
73 case Constant::kExternalReference:
74 case Constant::kHeapObject:
75 case Constant::kRpoNumber:
76 break;
77 }
78 UNREACHABLE();
79 return Operand::Zero();
80 }
81
82 MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
83 const size_t index = *first_index;
84 *mode = AddressingModeField::decode(instr_->opcode());
85 switch (*mode) {
86 case kMode_None:
87 break;
88 case kMode_MRI:
89 *first_index += 2;
90 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
91 case kMode_MRR:
92 *first_index += 2;
93 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
94 }
95 UNREACHABLE();
96 return MemOperand(r0);
97 }
98
99 MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
100 return MemoryOperand(mode, &first_index);
101 }
102
103 MemOperand ToMemOperand(InstructionOperand* op) const {
104 DCHECK_NOT_NULL(op);
105 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
106 FrameOffset offset = frame_access_state()->GetFrameOffset(
107 AllocatedOperand::cast(op)->index());
108 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
109 }
110};
111
112
113static inline bool HasRegisterInput(Instruction* instr, size_t index) {
114 return instr->InputAt(index)->IsRegister();
115}
116
117
118namespace {
119
120class OutOfLineLoadNAN32 final : public OutOfLineCode {
121 public:
122 OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
123 : OutOfLineCode(gen), result_(result) {}
124
125 void Generate() final {
126 __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
127 kScratchReg);
128 }
129
130 private:
131 DoubleRegister const result_;
132};
133
134
135class OutOfLineLoadNAN64 final : public OutOfLineCode {
136 public:
137 OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
138 : OutOfLineCode(gen), result_(result) {}
139
140 void Generate() final {
141 __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
142 kScratchReg);
143 }
144
145 private:
146 DoubleRegister const result_;
147};
148
149
150class OutOfLineLoadZero final : public OutOfLineCode {
151 public:
152 OutOfLineLoadZero(CodeGenerator* gen, Register result)
153 : OutOfLineCode(gen), result_(result) {}
154
155 void Generate() final { __ li(result_, Operand::Zero()); }
156
157 private:
158 Register const result_;
159};
160
161
162class OutOfLineRecordWrite final : public OutOfLineCode {
163 public:
164 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
165 Register value, Register scratch0, Register scratch1,
166 RecordWriteMode mode)
167 : OutOfLineCode(gen),
168 object_(object),
169 offset_(offset),
170 value_(value),
171 scratch0_(scratch0),
172 scratch1_(scratch1),
173 mode_(mode) {}
174
175 void Generate() final {
176 if (mode_ > RecordWriteMode::kValueIsPointer) {
177 __ JumpIfSmi(value_, exit());
178 }
179 if (mode_ > RecordWriteMode::kValueIsMap) {
180 __ CheckPageFlag(value_, scratch0_,
181 MemoryChunk::kPointersToHereAreInterestingMask, eq,
182 exit());
183 }
184 SaveFPRegsMode const save_fp_mode =
185 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
186 // TODO(turbofan): Once we get frame elision working, we need to save
187 // and restore lr properly here if the frame was elided.
188 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
189 EMIT_REMEMBERED_SET, save_fp_mode);
190 __ add(scratch1_, object_, offset_);
191 __ CallStub(&stub);
192 }
193
194 private:
195 Register const object_;
196 Register const offset_;
197 Register const value_;
198 Register const scratch0_;
199 Register const scratch1_;
200 RecordWriteMode const mode_;
201};
202
203
204Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
205 switch (condition) {
206 case kEqual:
207 return eq;
208 case kNotEqual:
209 return ne;
210 case kSignedLessThan:
211 case kUnsignedLessThan:
212 return lt;
213 case kSignedGreaterThanOrEqual:
214 case kUnsignedGreaterThanOrEqual:
215 return ge;
216 case kSignedLessThanOrEqual:
217 case kUnsignedLessThanOrEqual:
218 return le;
219 case kSignedGreaterThan:
220 case kUnsignedGreaterThan:
221 return gt;
222 case kOverflow:
223 // Overflow checked for add/sub only.
224 switch (op) {
225#if V8_TARGET_ARCH_PPC64
226 case kPPC_Add:
227 case kPPC_Sub:
228 return lt;
229#endif
230 case kPPC_AddWithOverflow32:
231 case kPPC_SubWithOverflow32:
232#if V8_TARGET_ARCH_PPC64
233 return ne;
234#else
235 return lt;
236#endif
237 default:
238 break;
239 }
240 break;
241 case kNotOverflow:
242 switch (op) {
243#if V8_TARGET_ARCH_PPC64
244 case kPPC_Add:
245 case kPPC_Sub:
246 return ge;
247#endif
248 case kPPC_AddWithOverflow32:
249 case kPPC_SubWithOverflow32:
250#if V8_TARGET_ARCH_PPC64
251 return eq;
252#else
253 return ge;
254#endif
255 default:
256 break;
257 }
258 break;
259 default:
260 break;
261 }
262 UNREACHABLE();
263 return kNoCondition;
264}
265
266} // namespace
267
268#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr) \
269 do { \
270 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
271 i.OutputRCBit()); \
272 } while (0)
273
274
275#define ASSEMBLE_FLOAT_BINOP_RC(asm_instr) \
276 do { \
277 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
278 i.InputDoubleRegister(1), i.OutputRCBit()); \
279 } while (0)
280
281
282#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \
283 do { \
284 if (HasRegisterInput(instr, 1)) { \
285 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
286 i.InputRegister(1)); \
287 } else { \
288 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
289 i.InputImmediate(1)); \
290 } \
291 } while (0)
292
293
294#define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm) \
295 do { \
296 if (HasRegisterInput(instr, 1)) { \
297 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
298 i.InputRegister(1), i.OutputRCBit()); \
299 } else { \
300 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
301 i.InputImmediate(1), i.OutputRCBit()); \
302 } \
303 } while (0)
304
305
306#define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm) \
307 do { \
308 if (HasRegisterInput(instr, 1)) { \
309 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
310 i.InputRegister(1), i.OutputRCBit()); \
311 } else { \
312 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
313 i.InputInt32(1), i.OutputRCBit()); \
314 } \
315 } while (0)
316
317
318#define ASSEMBLE_ADD_WITH_OVERFLOW() \
319 do { \
320 if (HasRegisterInput(instr, 1)) { \
321 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
322 i.InputRegister(1), kScratchReg, r0); \
323 } else { \
324 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
325 i.InputInt32(1), kScratchReg, r0); \
326 } \
327 } while (0)
328
329
330#define ASSEMBLE_SUB_WITH_OVERFLOW() \
331 do { \
332 if (HasRegisterInput(instr, 1)) { \
333 __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
334 i.InputRegister(1), kScratchReg, r0); \
335 } else { \
336 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
337 -i.InputInt32(1), kScratchReg, r0); \
338 } \
339 } while (0)
340
341
342#if V8_TARGET_ARCH_PPC64
343#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
344 do { \
345 ASSEMBLE_BINOP(add, addi); \
346 __ TestIfInt32(i.OutputRegister(), r0, cr0); \
347 } while (0)
348
349
350#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
351 do { \
352 ASSEMBLE_BINOP(sub, subi); \
353 __ TestIfInt32(i.OutputRegister(), r0, cr0); \
354 } while (0)
355#else
356#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
357#define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
358#endif
359
360
361#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
362 do { \
363 const CRegister cr = cr0; \
364 if (HasRegisterInput(instr, 1)) { \
365 if (i.CompareLogical()) { \
366 __ cmpl_instr(i.InputRegister(0), i.InputRegister(1), cr); \
367 } else { \
368 __ cmp_instr(i.InputRegister(0), i.InputRegister(1), cr); \
369 } \
370 } else { \
371 if (i.CompareLogical()) { \
372 __ cmpl_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
373 } else { \
374 __ cmp_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
375 } \
376 } \
377 DCHECK_EQ(SetRC, i.OutputRCBit()); \
378 } while (0)
379
380
381#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
382 do { \
383 const CRegister cr = cr0; \
384 __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1), cr); \
385 DCHECK_EQ(SetRC, i.OutputRCBit()); \
386 } while (0)
387
388
389#define ASSEMBLE_MODULO(div_instr, mul_instr) \
390 do { \
391 const Register scratch = kScratchReg; \
392 __ div_instr(scratch, i.InputRegister(0), i.InputRegister(1)); \
393 __ mul_instr(scratch, scratch, i.InputRegister(1)); \
394 __ sub(i.OutputRegister(), i.InputRegister(0), scratch, LeaveOE, \
395 i.OutputRCBit()); \
396 } while (0)
397
398
399#define ASSEMBLE_FLOAT_MODULO() \
400 do { \
401 FrameScope scope(masm(), StackFrame::MANUAL); \
402 __ PrepareCallCFunction(0, 2, kScratchReg); \
403 __ MovToFloatParameters(i.InputDoubleRegister(0), \
404 i.InputDoubleRegister(1)); \
405 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
406 0, 2); \
407 __ MovFromFloatResult(i.OutputDoubleRegister()); \
408 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
409 } while (0)
410
411
412#define ASSEMBLE_FLOAT_MAX(scratch_reg) \
413 do { \
414 __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
415 __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(0), \
416 i.InputDoubleRegister(1)); \
417 } while (0)
418
419
420#define ASSEMBLE_FLOAT_MIN(scratch_reg) \
421 do { \
422 __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
423 __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(1), \
424 i.InputDoubleRegister(0)); \
425 } while (0)
426
427
428#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx) \
429 do { \
430 DoubleRegister result = i.OutputDoubleRegister(); \
431 AddressingMode mode = kMode_None; \
432 MemOperand operand = i.MemoryOperand(&mode); \
433 if (mode == kMode_MRI) { \
434 __ asm_instr(result, operand); \
435 } else { \
436 __ asm_instrx(result, operand); \
437 } \
438 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
439 } while (0)
440
441
442#define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
443 do { \
444 Register result = i.OutputRegister(); \
445 AddressingMode mode = kMode_None; \
446 MemOperand operand = i.MemoryOperand(&mode); \
447 if (mode == kMode_MRI) { \
448 __ asm_instr(result, operand); \
449 } else { \
450 __ asm_instrx(result, operand); \
451 } \
452 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
453 } while (0)
454
455
456#define ASSEMBLE_STORE_FLOAT32() \
457 do { \
458 size_t index = 0; \
459 AddressingMode mode = kMode_None; \
460 MemOperand operand = i.MemoryOperand(&mode, &index); \
461 DoubleRegister value = i.InputDoubleRegister(index); \
462 __ frsp(kScratchDoubleReg, value); \
463 if (mode == kMode_MRI) { \
464 __ stfs(kScratchDoubleReg, operand); \
465 } else { \
466 __ stfsx(kScratchDoubleReg, operand); \
467 } \
468 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
469 } while (0)
470
471
472#define ASSEMBLE_STORE_DOUBLE() \
473 do { \
474 size_t index = 0; \
475 AddressingMode mode = kMode_None; \
476 MemOperand operand = i.MemoryOperand(&mode, &index); \
477 DoubleRegister value = i.InputDoubleRegister(index); \
478 if (mode == kMode_MRI) { \
479 __ stfd(value, operand); \
480 } else { \
481 __ stfdx(value, operand); \
482 } \
483 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
484 } while (0)
485
486
487#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx) \
488 do { \
489 size_t index = 0; \
490 AddressingMode mode = kMode_None; \
491 MemOperand operand = i.MemoryOperand(&mode, &index); \
492 Register value = i.InputRegister(index); \
493 if (mode == kMode_MRI) { \
494 __ asm_instr(value, operand); \
495 } else { \
496 __ asm_instrx(value, operand); \
497 } \
498 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
499 } while (0)
500
501
502// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
503#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \
504 do { \
505 DoubleRegister result = i.OutputDoubleRegister(); \
506 size_t index = 0; \
507 AddressingMode mode = kMode_None; \
508 MemOperand operand = i.MemoryOperand(&mode, index); \
509 DCHECK_EQ(kMode_MRR, mode); \
510 Register offset = operand.rb(); \
511 __ extsw(offset, offset); \
512 if (HasRegisterInput(instr, 2)) { \
513 __ cmplw(offset, i.InputRegister(2)); \
514 } else { \
515 __ cmplwi(offset, i.InputImmediate(2)); \
516 } \
517 auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
518 __ bge(ool->entry()); \
519 if (mode == kMode_MRI) { \
520 __ asm_instr(result, operand); \
521 } else { \
522 __ asm_instrx(result, operand); \
523 } \
524 __ bind(ool->exit()); \
525 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
526 } while (0)
527
528
529// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
530#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
531 do { \
532 Register result = i.OutputRegister(); \
533 size_t index = 0; \
534 AddressingMode mode = kMode_None; \
535 MemOperand operand = i.MemoryOperand(&mode, index); \
536 DCHECK_EQ(kMode_MRR, mode); \
537 Register offset = operand.rb(); \
538 __ extsw(offset, offset); \
539 if (HasRegisterInput(instr, 2)) { \
540 __ cmplw(offset, i.InputRegister(2)); \
541 } else { \
542 __ cmplwi(offset, i.InputImmediate(2)); \
543 } \
544 auto ool = new (zone()) OutOfLineLoadZero(this, result); \
545 __ bge(ool->entry()); \
546 if (mode == kMode_MRI) { \
547 __ asm_instr(result, operand); \
548 } else { \
549 __ asm_instrx(result, operand); \
550 } \
551 __ bind(ool->exit()); \
552 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
553 } while (0)
554
555
556// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
557#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
558 do { \
559 Label done; \
560 size_t index = 0; \
561 AddressingMode mode = kMode_None; \
562 MemOperand operand = i.MemoryOperand(&mode, index); \
563 DCHECK_EQ(kMode_MRR, mode); \
564 Register offset = operand.rb(); \
565 __ extsw(offset, offset); \
566 if (HasRegisterInput(instr, 2)) { \
567 __ cmplw(offset, i.InputRegister(2)); \
568 } else { \
569 __ cmplwi(offset, i.InputImmediate(2)); \
570 } \
571 __ bge(&done); \
572 DoubleRegister value = i.InputDoubleRegister(3); \
573 __ frsp(kScratchDoubleReg, value); \
574 if (mode == kMode_MRI) { \
575 __ stfs(kScratchDoubleReg, operand); \
576 } else { \
577 __ stfsx(kScratchDoubleReg, operand); \
578 } \
579 __ bind(&done); \
580 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
581 } while (0)
582
583
584// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
585#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
586 do { \
587 Label done; \
588 size_t index = 0; \
589 AddressingMode mode = kMode_None; \
590 MemOperand operand = i.MemoryOperand(&mode, index); \
591 DCHECK_EQ(kMode_MRR, mode); \
592 Register offset = operand.rb(); \
593 __ extsw(offset, offset); \
594 if (HasRegisterInput(instr, 2)) { \
595 __ cmplw(offset, i.InputRegister(2)); \
596 } else { \
597 __ cmplwi(offset, i.InputImmediate(2)); \
598 } \
599 __ bge(&done); \
600 DoubleRegister value = i.InputDoubleRegister(3); \
601 if (mode == kMode_MRI) { \
602 __ stfd(value, operand); \
603 } else { \
604 __ stfdx(value, operand); \
605 } \
606 __ bind(&done); \
607 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
608 } while (0)
609
610
611// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
612#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
613 do { \
614 Label done; \
615 size_t index = 0; \
616 AddressingMode mode = kMode_None; \
617 MemOperand operand = i.MemoryOperand(&mode, index); \
618 DCHECK_EQ(kMode_MRR, mode); \
619 Register offset = operand.rb(); \
620 __ extsw(offset, offset); \
621 if (HasRegisterInput(instr, 2)) { \
622 __ cmplw(offset, i.InputRegister(2)); \
623 } else { \
624 __ cmplwi(offset, i.InputImmediate(2)); \
625 } \
626 __ bge(&done); \
627 Register value = i.InputRegister(3); \
628 if (mode == kMode_MRI) { \
629 __ asm_instr(value, operand); \
630 } else { \
631 __ asm_instrx(value, operand); \
632 } \
633 __ bind(&done); \
634 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
635 } while (0)
636
637
638void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
639 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
640 if (sp_slot_delta > 0) {
641 __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
642 }
643 frame_access_state()->SetFrameAccessToDefault();
644}
645
646
647void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
648 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
649 if (sp_slot_delta < 0) {
650 __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
651 frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
652 }
653 if (frame()->needs_frame()) {
654 if (FLAG_enable_embedded_constant_pool) {
655 __ LoadP(kConstantPoolRegister,
656 MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
657 }
658 __ LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
659 __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
660 __ mtlr(r0);
661 }
662 frame_access_state()->SetFrameAccessToSP();
663}
664
665
666// Assembles an instruction after register allocation, producing machine code.
667void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
668 PPCOperandConverter i(this, instr);
669 ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
670
671 switch (opcode) {
672 case kArchCallCodeObject: {
673 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
674 masm());
675 EnsureSpaceForLazyDeopt();
676 if (HasRegisterInput(instr, 0)) {
677 __ addi(ip, i.InputRegister(0),
678 Operand(Code::kHeaderSize - kHeapObjectTag));
679 __ Call(ip);
680 } else {
681 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
682 RelocInfo::CODE_TARGET);
683 }
684 RecordCallPosition(instr);
685 DCHECK_EQ(LeaveRC, i.OutputRCBit());
686 frame_access_state()->ClearSPDelta();
687 break;
688 }
689 case kArchTailCallCodeObject: {
690 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
691 AssembleDeconstructActivationRecord(stack_param_delta);
692 if (HasRegisterInput(instr, 0)) {
693 __ addi(ip, i.InputRegister(0),
694 Operand(Code::kHeaderSize - kHeapObjectTag));
695 __ Jump(ip);
696 } else {
697 // We cannot use the constant pool to load the target since
698 // we've already restored the caller's frame.
699 ConstantPoolUnavailableScope constant_pool_unavailable(masm());
700 __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
701 RelocInfo::CODE_TARGET);
702 }
703 DCHECK_EQ(LeaveRC, i.OutputRCBit());
704 frame_access_state()->ClearSPDelta();
705 break;
706 }
707 case kArchCallJSFunction: {
708 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
709 masm());
710 EnsureSpaceForLazyDeopt();
711 Register func = i.InputRegister(0);
712 if (FLAG_debug_code) {
713 // Check the function's context matches the context argument.
714 __ LoadP(kScratchReg,
715 FieldMemOperand(func, JSFunction::kContextOffset));
716 __ cmp(cp, kScratchReg);
717 __ Assert(eq, kWrongFunctionContext);
718 }
719 __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
720 __ Call(ip);
721 RecordCallPosition(instr);
722 DCHECK_EQ(LeaveRC, i.OutputRCBit());
723 frame_access_state()->ClearSPDelta();
724 break;
725 }
726 case kArchTailCallJSFunction: {
727 Register func = i.InputRegister(0);
728 if (FLAG_debug_code) {
729 // Check the function's context matches the context argument.
730 __ LoadP(kScratchReg,
731 FieldMemOperand(func, JSFunction::kContextOffset));
732 __ cmp(cp, kScratchReg);
733 __ Assert(eq, kWrongFunctionContext);
734 }
735 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
736 AssembleDeconstructActivationRecord(stack_param_delta);
737 __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
738 __ Jump(ip);
739 DCHECK_EQ(LeaveRC, i.OutputRCBit());
740 frame_access_state()->ClearSPDelta();
741 break;
742 }
743 case kArchLazyBailout: {
744 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
745 masm());
746 EnsureSpaceForLazyDeopt();
747 RecordCallPosition(instr);
748 break;
749 }
750 case kArchPrepareCallCFunction: {
751 int const num_parameters = MiscField::decode(instr->opcode());
752 __ PrepareCallCFunction(num_parameters, kScratchReg);
753 // Frame alignment requires using FP-relative frame addressing.
754 frame_access_state()->SetFrameAccessToFP();
755 break;
756 }
757 case kArchPrepareTailCall:
758 AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
759 break;
760 case kArchCallCFunction: {
761 int const num_parameters = MiscField::decode(instr->opcode());
762 if (instr->InputAt(0)->IsImmediate()) {
763 ExternalReference ref = i.InputExternalReference(0);
764 __ CallCFunction(ref, num_parameters);
765 } else {
766 Register func = i.InputRegister(0);
767 __ CallCFunction(func, num_parameters);
768 }
769 frame_access_state()->SetFrameAccessToDefault();
770 frame_access_state()->ClearSPDelta();
771 break;
772 }
773 case kArchJmp:
774 AssembleArchJump(i.InputRpo(0));
775 DCHECK_EQ(LeaveRC, i.OutputRCBit());
776 break;
777 case kArchLookupSwitch:
778 AssembleArchLookupSwitch(instr);
779 DCHECK_EQ(LeaveRC, i.OutputRCBit());
780 break;
781 case kArchTableSwitch:
782 AssembleArchTableSwitch(instr);
783 DCHECK_EQ(LeaveRC, i.OutputRCBit());
784 break;
785 case kArchNop:
786 case kArchThrowTerminator:
787 // don't emit code for nops.
788 DCHECK_EQ(LeaveRC, i.OutputRCBit());
789 break;
790 case kArchDeoptimize: {
791 int deopt_state_id =
792 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
793 Deoptimizer::BailoutType bailout_type =
794 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
795 AssembleDeoptimizerCall(deopt_state_id, bailout_type);
796 break;
797 }
798 case kArchRet:
799 AssembleReturn();
800 DCHECK_EQ(LeaveRC, i.OutputRCBit());
801 break;
802 case kArchStackPointer:
803 __ mr(i.OutputRegister(), sp);
804 DCHECK_EQ(LeaveRC, i.OutputRCBit());
805 break;
806 case kArchFramePointer:
807 __ mr(i.OutputRegister(), fp);
808 DCHECK_EQ(LeaveRC, i.OutputRCBit());
809 break;
810 case kArchTruncateDoubleToI:
811 // TODO(mbrandy): move slow call to stub out of line.
812 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
813 DCHECK_EQ(LeaveRC, i.OutputRCBit());
814 break;
815 case kArchStoreWithWriteBarrier: {
816 RecordWriteMode mode =
817 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
818 Register object = i.InputRegister(0);
819 Register offset = i.InputRegister(1);
820 Register value = i.InputRegister(2);
821 Register scratch0 = i.TempRegister(0);
822 Register scratch1 = i.TempRegister(1);
823 auto ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
824 scratch0, scratch1, mode);
825 __ StorePX(value, MemOperand(object, offset));
826 __ CheckPageFlag(object, scratch0,
827 MemoryChunk::kPointersFromHereAreInterestingMask, ne,
828 ool->entry());
829 __ bind(ool->exit());
830 break;
831 }
832 case kPPC_And:
833 if (HasRegisterInput(instr, 1)) {
834 __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
835 i.OutputRCBit());
836 } else {
837 __ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
838 }
839 break;
840 case kPPC_AndComplement:
841 __ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
842 i.OutputRCBit());
843 break;
844 case kPPC_Or:
845 if (HasRegisterInput(instr, 1)) {
846 __ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
847 i.OutputRCBit());
848 } else {
849 __ ori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
850 DCHECK_EQ(LeaveRC, i.OutputRCBit());
851 }
852 break;
853 case kPPC_OrComplement:
854 __ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
855 i.OutputRCBit());
856 break;
857 case kPPC_Xor:
858 if (HasRegisterInput(instr, 1)) {
859 __ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
860 i.OutputRCBit());
861 } else {
862 __ xori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
863 DCHECK_EQ(LeaveRC, i.OutputRCBit());
864 }
865 break;
866 case kPPC_ShiftLeft32:
867 ASSEMBLE_BINOP_RC(slw, slwi);
868 break;
869#if V8_TARGET_ARCH_PPC64
870 case kPPC_ShiftLeft64:
871 ASSEMBLE_BINOP_RC(sld, sldi);
872 break;
873#endif
874 case kPPC_ShiftRight32:
875 ASSEMBLE_BINOP_RC(srw, srwi);
876 break;
877#if V8_TARGET_ARCH_PPC64
878 case kPPC_ShiftRight64:
879 ASSEMBLE_BINOP_RC(srd, srdi);
880 break;
881#endif
882 case kPPC_ShiftRightAlg32:
883 ASSEMBLE_BINOP_INT_RC(sraw, srawi);
884 break;
885#if V8_TARGET_ARCH_PPC64
886 case kPPC_ShiftRightAlg64:
887 ASSEMBLE_BINOP_INT_RC(srad, sradi);
888 break;
889#endif
890 case kPPC_RotRight32:
891 if (HasRegisterInput(instr, 1)) {
892 __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
893 __ rotlw(i.OutputRegister(), i.InputRegister(0), kScratchReg,
894 i.OutputRCBit());
895 } else {
896 int sh = i.InputInt32(1);
897 __ rotrwi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
898 }
899 break;
900#if V8_TARGET_ARCH_PPC64
901 case kPPC_RotRight64:
902 if (HasRegisterInput(instr, 1)) {
903 __ subfic(kScratchReg, i.InputRegister(1), Operand(64));
904 __ rotld(i.OutputRegister(), i.InputRegister(0), kScratchReg,
905 i.OutputRCBit());
906 } else {
907 int sh = i.InputInt32(1);
908 __ rotrdi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
909 }
910 break;
911#endif
912 case kPPC_Not:
913 __ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit());
914 break;
915 case kPPC_RotLeftAndMask32:
916 __ rlwinm(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
917 31 - i.InputInt32(2), 31 - i.InputInt32(3), i.OutputRCBit());
918 break;
919#if V8_TARGET_ARCH_PPC64
920 case kPPC_RotLeftAndClear64:
921 __ rldic(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
922 63 - i.InputInt32(2), i.OutputRCBit());
923 break;
924 case kPPC_RotLeftAndClearLeft64:
925 __ rldicl(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
926 63 - i.InputInt32(2), i.OutputRCBit());
927 break;
928 case kPPC_RotLeftAndClearRight64:
929 __ rldicr(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
930 63 - i.InputInt32(2), i.OutputRCBit());
931 break;
932#endif
933 case kPPC_Add:
934#if V8_TARGET_ARCH_PPC64
935 if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
936 ASSEMBLE_ADD_WITH_OVERFLOW();
937 } else {
938#endif
939 if (HasRegisterInput(instr, 1)) {
940 __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
941 LeaveOE, i.OutputRCBit());
942 } else {
943 __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
944 DCHECK_EQ(LeaveRC, i.OutputRCBit());
945 }
946#if V8_TARGET_ARCH_PPC64
947 }
948#endif
949 break;
950 case kPPC_AddWithOverflow32:
951 ASSEMBLE_ADD_WITH_OVERFLOW32();
952 break;
953 case kPPC_AddDouble:
954 ASSEMBLE_FLOAT_BINOP_RC(fadd);
955 break;
956 case kPPC_Sub:
957#if V8_TARGET_ARCH_PPC64
958 if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
959 ASSEMBLE_SUB_WITH_OVERFLOW();
960 } else {
961#endif
962 if (HasRegisterInput(instr, 1)) {
963 __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
964 LeaveOE, i.OutputRCBit());
965 } else {
966 __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
967 DCHECK_EQ(LeaveRC, i.OutputRCBit());
968 }
969#if V8_TARGET_ARCH_PPC64
970 }
971#endif
972 break;
973 case kPPC_SubWithOverflow32:
974 ASSEMBLE_SUB_WITH_OVERFLOW32();
975 break;
976 case kPPC_SubDouble:
977 ASSEMBLE_FLOAT_BINOP_RC(fsub);
978 break;
979 case kPPC_Mul32:
980 __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
981 LeaveOE, i.OutputRCBit());
982 break;
983#if V8_TARGET_ARCH_PPC64
984 case kPPC_Mul64:
985 __ mulld(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
986 LeaveOE, i.OutputRCBit());
987 break;
988#endif
989 case kPPC_MulHigh32:
990 __ mulhw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
991 i.OutputRCBit());
992 break;
993 case kPPC_MulHighU32:
994 __ mulhwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
995 i.OutputRCBit());
996 break;
997 case kPPC_MulDouble:
998 ASSEMBLE_FLOAT_BINOP_RC(fmul);
999 break;
1000 case kPPC_Div32:
1001 __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1002 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1003 break;
1004#if V8_TARGET_ARCH_PPC64
1005 case kPPC_Div64:
1006 __ divd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1007 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1008 break;
1009#endif
1010 case kPPC_DivU32:
1011 __ divwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1012 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1013 break;
1014#if V8_TARGET_ARCH_PPC64
1015 case kPPC_DivU64:
1016 __ divdu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1017 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1018 break;
1019#endif
1020 case kPPC_DivDouble:
1021 ASSEMBLE_FLOAT_BINOP_RC(fdiv);
1022 break;
1023 case kPPC_Mod32:
1024 ASSEMBLE_MODULO(divw, mullw);
1025 break;
1026#if V8_TARGET_ARCH_PPC64
1027 case kPPC_Mod64:
1028 ASSEMBLE_MODULO(divd, mulld);
1029 break;
1030#endif
1031 case kPPC_ModU32:
1032 ASSEMBLE_MODULO(divwu, mullw);
1033 break;
1034#if V8_TARGET_ARCH_PPC64
1035 case kPPC_ModU64:
1036 ASSEMBLE_MODULO(divdu, mulld);
1037 break;
1038#endif
1039 case kPPC_ModDouble:
1040 // TODO(bmeurer): We should really get rid of this special instruction,
1041 // and generate a CallAddress instruction instead.
1042 ASSEMBLE_FLOAT_MODULO();
1043 break;
1044 case kPPC_Neg:
1045 __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
1046 break;
1047 case kPPC_MaxDouble:
1048 ASSEMBLE_FLOAT_MAX(kScratchDoubleReg);
1049 break;
1050 case kPPC_MinDouble:
1051 ASSEMBLE_FLOAT_MIN(kScratchDoubleReg);
1052 break;
1053 case kPPC_AbsDouble:
1054 ASSEMBLE_FLOAT_UNOP_RC(fabs);
1055 break;
1056 case kPPC_SqrtDouble:
1057 ASSEMBLE_FLOAT_UNOP_RC(fsqrt);
1058 break;
1059 case kPPC_FloorDouble:
1060 ASSEMBLE_FLOAT_UNOP_RC(frim);
1061 break;
1062 case kPPC_CeilDouble:
1063 ASSEMBLE_FLOAT_UNOP_RC(frip);
1064 break;
1065 case kPPC_TruncateDouble:
1066 ASSEMBLE_FLOAT_UNOP_RC(friz);
1067 break;
1068 case kPPC_RoundDouble:
1069 ASSEMBLE_FLOAT_UNOP_RC(frin);
1070 break;
1071 case kPPC_NegDouble:
1072 ASSEMBLE_FLOAT_UNOP_RC(fneg);
1073 break;
1074 case kPPC_Cntlz32:
1075 __ cntlzw_(i.OutputRegister(), i.InputRegister(0));
1076 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1077 break;
1078#if V8_TARGET_ARCH_PPC64
1079 case kPPC_Cntlz64:
1080 __ cntlzd_(i.OutputRegister(), i.InputRegister(0));
1081 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1082 break;
1083#endif
1084 case kPPC_Popcnt32:
1085 __ popcntw(i.OutputRegister(), i.InputRegister(0));
1086 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1087 break;
1088#if V8_TARGET_ARCH_PPC64
1089 case kPPC_Popcnt64:
1090 __ popcntd(i.OutputRegister(), i.InputRegister(0));
1091 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1092 break;
1093#endif
1094 case kPPC_Cmp32:
1095 ASSEMBLE_COMPARE(cmpw, cmplw);
1096 break;
1097#if V8_TARGET_ARCH_PPC64
1098 case kPPC_Cmp64:
1099 ASSEMBLE_COMPARE(cmp, cmpl);
1100 break;
1101#endif
1102 case kPPC_CmpDouble:
1103 ASSEMBLE_FLOAT_COMPARE(fcmpu);
1104 break;
1105 case kPPC_Tst32:
1106 if (HasRegisterInput(instr, 1)) {
1107 __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
1108 } else {
1109 __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
1110 }
1111#if V8_TARGET_ARCH_PPC64
1112 __ extsw(r0, r0, i.OutputRCBit());
1113#endif
1114 DCHECK_EQ(SetRC, i.OutputRCBit());
1115 break;
1116#if V8_TARGET_ARCH_PPC64
1117 case kPPC_Tst64:
1118 if (HasRegisterInput(instr, 1)) {
1119 __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
1120 } else {
1121 __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
1122 }
1123 DCHECK_EQ(SetRC, i.OutputRCBit());
1124 break;
1125#endif
1126 case kPPC_Push:
1127 if (instr->InputAt(0)->IsDoubleRegister()) {
1128 __ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1129 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1130 } else {
1131 __ Push(i.InputRegister(0));
1132 frame_access_state()->IncreaseSPDelta(1);
1133 }
1134 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1135 break;
1136 case kPPC_PushFrame: {
1137 int num_slots = i.InputInt32(1);
1138 if (instr->InputAt(0)->IsDoubleRegister()) {
1139 __ stfdu(i.InputDoubleRegister(0),
1140 MemOperand(sp, -num_slots * kPointerSize));
1141 } else {
1142 __ StorePU(i.InputRegister(0),
1143 MemOperand(sp, -num_slots * kPointerSize));
1144 }
1145 break;
1146 }
1147 case kPPC_StoreToStackSlot: {
1148 int slot = i.InputInt32(1);
1149 if (instr->InputAt(0)->IsDoubleRegister()) {
1150 __ stfd(i.InputDoubleRegister(0), MemOperand(sp, slot * kPointerSize));
1151 } else {
1152 __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
1153 }
1154 break;
1155 }
1156 case kPPC_ExtendSignWord8:
1157 __ extsb(i.OutputRegister(), i.InputRegister(0));
1158 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1159 break;
1160 case kPPC_ExtendSignWord16:
1161 __ extsh(i.OutputRegister(), i.InputRegister(0));
1162 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1163 break;
1164#if V8_TARGET_ARCH_PPC64
1165 case kPPC_ExtendSignWord32:
1166 __ extsw(i.OutputRegister(), i.InputRegister(0));
1167 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1168 break;
1169 case kPPC_Uint32ToUint64:
1170 // Zero extend
1171 __ clrldi(i.OutputRegister(), i.InputRegister(0), Operand(32));
1172 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1173 break;
1174 case kPPC_Int64ToInt32:
1175 __ extsw(i.OutputRegister(), i.InputRegister(0));
1176 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1177 break;
1178 case kPPC_Int64ToFloat32:
1179 __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
1180 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1181 break;
1182 case kPPC_Int64ToDouble:
1183 __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
1184 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1185 break;
1186 case kPPC_Uint64ToFloat32:
1187 __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
1188 i.OutputDoubleRegister());
1189 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1190 break;
1191 case kPPC_Uint64ToDouble:
1192 __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
1193 i.OutputDoubleRegister());
1194 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1195 break;
1196#endif
1197 case kPPC_Int32ToDouble:
1198 __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
1199 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1200 break;
1201 case kPPC_Uint32ToDouble:
1202 __ ConvertUnsignedIntToDouble(i.InputRegister(0),
1203 i.OutputDoubleRegister());
1204 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1205 break;
1206 case kPPC_DoubleToInt32:
1207 case kPPC_DoubleToUint32:
1208 case kPPC_DoubleToInt64: {
1209#if V8_TARGET_ARCH_PPC64
1210 bool check_conversion =
1211 (opcode == kPPC_DoubleToInt64 && i.OutputCount() > 1);
1212 if (check_conversion) {
1213 __ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
1214 }
1215#endif
1216 __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
1217#if !V8_TARGET_ARCH_PPC64
1218 kScratchReg,
1219#endif
1220 i.OutputRegister(0), kScratchDoubleReg);
1221#if V8_TARGET_ARCH_PPC64
1222 if (check_conversion) {
1223 // Set 2nd output to zero if conversion fails.
1224 CRegister cr = cr7;
1225 int crbit = v8::internal::Assembler::encode_crbit(
1226 cr, static_cast<CRBit>(VXCVI % CRWIDTH));
1227 __ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7
1228 if (CpuFeatures::IsSupported(ISELECT)) {
1229 __ li(i.OutputRegister(1), Operand(1));
1230 __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
1231 } else {
1232 __ li(i.OutputRegister(1), Operand::Zero());
1233 __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
1234 __ li(i.OutputRegister(1), Operand(1));
1235 }
1236 }
1237#endif
1238 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1239 break;
1240 }
1241#if V8_TARGET_ARCH_PPC64
1242 case kPPC_DoubleToUint64: {
1243 bool check_conversion = (i.OutputCount() > 1);
1244 if (check_conversion) {
1245 __ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
1246 }
1247 __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
1248 i.OutputRegister(0), kScratchDoubleReg);
1249 if (check_conversion) {
1250 // Set 2nd output to zero if conversion fails.
1251 CRegister cr = cr7;
1252 int crbit = v8::internal::Assembler::encode_crbit(
1253 cr, static_cast<CRBit>(VXCVI % CRWIDTH));
1254 __ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7
1255 if (CpuFeatures::IsSupported(ISELECT)) {
1256 __ li(i.OutputRegister(1), Operand(1));
1257 __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
1258 } else {
1259 __ li(i.OutputRegister(1), Operand::Zero());
1260 __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
1261 __ li(i.OutputRegister(1), Operand(1));
1262 }
1263 }
1264 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1265 break;
1266 }
1267#endif
1268 case kPPC_DoubleToFloat32:
1269 ASSEMBLE_FLOAT_UNOP_RC(frsp);
1270 break;
1271 case kPPC_Float32ToDouble:
1272 // Nothing to do.
1273 __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1274 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1275 break;
1276 case kPPC_DoubleExtractLowWord32:
1277 __ MovDoubleLowToInt(i.OutputRegister(), i.InputDoubleRegister(0));
1278 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1279 break;
1280 case kPPC_DoubleExtractHighWord32:
1281 __ MovDoubleHighToInt(i.OutputRegister(), i.InputDoubleRegister(0));
1282 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1283 break;
1284 case kPPC_DoubleInsertLowWord32:
1285 __ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1), r0);
1286 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1287 break;
1288 case kPPC_DoubleInsertHighWord32:
1289 __ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1), r0);
1290 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1291 break;
1292 case kPPC_DoubleConstruct:
1293#if V8_TARGET_ARCH_PPC64
1294 __ MovInt64ComponentsToDouble(i.OutputDoubleRegister(),
1295 i.InputRegister(0), i.InputRegister(1), r0);
1296#else
1297 __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0),
1298 i.InputRegister(1));
1299#endif
1300 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1301 break;
1302 case kPPC_BitcastFloat32ToInt32:
1303 __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
1304 break;
1305 case kPPC_BitcastInt32ToFloat32:
1306 __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
1307 break;
1308#if V8_TARGET_ARCH_PPC64
1309 case kPPC_BitcastDoubleToInt64:
1310 __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
1311 break;
1312 case kPPC_BitcastInt64ToDouble:
1313 __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
1314 break;
1315#endif
1316 case kPPC_LoadWordU8:
1317 ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
1318 break;
1319 case kPPC_LoadWordS8:
1320 ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
1321 __ extsb(i.OutputRegister(), i.OutputRegister());
1322 break;
1323 case kPPC_LoadWordU16:
1324 ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
1325 break;
1326 case kPPC_LoadWordS16:
1327 ASSEMBLE_LOAD_INTEGER(lha, lhax);
1328 break;
1329 case kPPC_LoadWordS32:
1330 ASSEMBLE_LOAD_INTEGER(lwa, lwax);
1331 break;
1332#if V8_TARGET_ARCH_PPC64
1333 case kPPC_LoadWord64:
1334 ASSEMBLE_LOAD_INTEGER(ld, ldx);
1335 break;
1336#endif
1337 case kPPC_LoadFloat32:
1338 ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
1339 break;
1340 case kPPC_LoadDouble:
1341 ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
1342 break;
1343 case kPPC_StoreWord8:
1344 ASSEMBLE_STORE_INTEGER(stb, stbx);
1345 break;
1346 case kPPC_StoreWord16:
1347 ASSEMBLE_STORE_INTEGER(sth, sthx);
1348 break;
1349 case kPPC_StoreWord32:
1350 ASSEMBLE_STORE_INTEGER(stw, stwx);
1351 break;
1352#if V8_TARGET_ARCH_PPC64
1353 case kPPC_StoreWord64:
1354 ASSEMBLE_STORE_INTEGER(std, stdx);
1355 break;
1356#endif
1357 case kPPC_StoreFloat32:
1358 ASSEMBLE_STORE_FLOAT32();
1359 break;
1360 case kPPC_StoreDouble:
1361 ASSEMBLE_STORE_DOUBLE();
1362 break;
1363 case kCheckedLoadInt8:
1364 ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
1365 __ extsb(i.OutputRegister(), i.OutputRegister());
1366 break;
1367 case kCheckedLoadUint8:
1368 ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
1369 break;
1370 case kCheckedLoadInt16:
1371 ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax);
1372 break;
1373 case kCheckedLoadUint16:
1374 ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
1375 break;
1376 case kCheckedLoadWord32:
1377 ASSEMBLE_CHECKED_LOAD_INTEGER(lwa, lwax);
1378 break;
1379 case kCheckedLoadWord64:
1380#if V8_TARGET_ARCH_PPC64
1381 ASSEMBLE_CHECKED_LOAD_INTEGER(ld, ldx);
1382#else
1383 UNREACHABLE();
1384#endif
1385 break;
1386 case kCheckedLoadFloat32:
1387 ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
1388 break;
1389 case kCheckedLoadFloat64:
1390 ASSEMBLE_CHECKED_LOAD_FLOAT(lfd, lfdx, 64);
1391 break;
1392 case kCheckedStoreWord8:
1393 ASSEMBLE_CHECKED_STORE_INTEGER(stb, stbx);
1394 break;
1395 case kCheckedStoreWord16:
1396 ASSEMBLE_CHECKED_STORE_INTEGER(sth, sthx);
1397 break;
1398 case kCheckedStoreWord32:
1399 ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
1400 break;
1401 case kCheckedStoreWord64:
1402#if V8_TARGET_ARCH_PPC64
1403 ASSEMBLE_CHECKED_STORE_INTEGER(std, stdx);
1404#else
1405 UNREACHABLE();
1406#endif
1407 break;
1408 case kCheckedStoreFloat32:
1409 ASSEMBLE_CHECKED_STORE_FLOAT32();
1410 break;
1411 case kCheckedStoreFloat64:
1412 ASSEMBLE_CHECKED_STORE_DOUBLE();
1413 break;
1414 default:
1415 UNREACHABLE();
1416 break;
1417 }
1418} // NOLINT(readability/fn_size)
1419
1420
1421// Assembles branches after an instruction.
1422void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1423 PPCOperandConverter i(this, instr);
1424 Label* tlabel = branch->true_label;
1425 Label* flabel = branch->false_label;
1426 ArchOpcode op = instr->arch_opcode();
1427 FlagsCondition condition = branch->condition;
1428 CRegister cr = cr0;
1429
1430 Condition cond = FlagsConditionToCondition(condition, op);
1431 if (op == kPPC_CmpDouble) {
1432 // check for unordered if necessary
1433 if (cond == le) {
1434 __ bunordered(flabel, cr);
1435 // Unnecessary for eq/lt since only FU bit will be set.
1436 } else if (cond == gt) {
1437 __ bunordered(tlabel, cr);
1438 // Unnecessary for ne/ge since only FU bit will be set.
1439 }
1440 }
1441 __ b(cond, tlabel, cr);
1442 if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
1443}
1444
1445
1446void CodeGenerator::AssembleArchJump(RpoNumber target) {
1447 if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
1448}
1449
1450
1451// Assembles boolean materializations after an instruction.
1452void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1453 FlagsCondition condition) {
1454 PPCOperandConverter i(this, instr);
1455 Label done;
1456 ArchOpcode op = instr->arch_opcode();
1457 CRegister cr = cr0;
1458 int reg_value = -1;
1459
1460 // Materialize a full 32-bit 1 or 0 value. The result register is always the
1461 // last output of the instruction.
1462 DCHECK_NE(0u, instr->OutputCount());
1463 Register reg = i.OutputRegister(instr->OutputCount() - 1);
1464
1465 Condition cond = FlagsConditionToCondition(condition, op);
1466 if (op == kPPC_CmpDouble) {
1467 // check for unordered if necessary
1468 if (cond == le) {
1469 reg_value = 0;
1470 __ li(reg, Operand::Zero());
1471 __ bunordered(&done, cr);
1472 } else if (cond == gt) {
1473 reg_value = 1;
1474 __ li(reg, Operand(1));
1475 __ bunordered(&done, cr);
1476 }
1477 // Unnecessary for eq/lt & ne/ge since only FU bit will be set.
1478 }
1479
1480 if (CpuFeatures::IsSupported(ISELECT)) {
1481 switch (cond) {
1482 case eq:
1483 case lt:
1484 case gt:
1485 if (reg_value != 1) __ li(reg, Operand(1));
1486 __ li(kScratchReg, Operand::Zero());
1487 __ isel(cond, reg, reg, kScratchReg, cr);
1488 break;
1489 case ne:
1490 case ge:
1491 case le:
1492 if (reg_value != 1) __ li(reg, Operand(1));
1493 // r0 implies logical zero in this form
1494 __ isel(NegateCondition(cond), reg, r0, reg, cr);
1495 break;
1496 default:
1497 UNREACHABLE();
1498 break;
1499 }
1500 } else {
1501 if (reg_value != 0) __ li(reg, Operand::Zero());
1502 __ b(NegateCondition(cond), &done, cr);
1503 __ li(reg, Operand(1));
1504 }
1505 __ bind(&done);
1506}
1507
1508
1509void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1510 PPCOperandConverter i(this, instr);
1511 Register input = i.InputRegister(0);
1512 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1513 __ Cmpi(input, Operand(i.InputInt32(index + 0)), r0);
1514 __ beq(GetLabel(i.InputRpo(index + 1)));
1515 }
1516 AssembleArchJump(i.InputRpo(1));
1517}
1518
1519
1520void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1521 PPCOperandConverter i(this, instr);
1522 Register input = i.InputRegister(0);
1523 int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1524 Label** cases = zone()->NewArray<Label*>(case_count);
1525 for (int32_t index = 0; index < case_count; ++index) {
1526 cases[index] = GetLabel(i.InputRpo(index + 2));
1527 }
1528 Label* const table = AddJumpTable(cases, case_count);
1529 __ Cmpli(input, Operand(case_count), r0);
1530 __ bge(GetLabel(i.InputRpo(1)));
1531 __ mov_label_addr(kScratchReg, table);
1532 __ ShiftLeftImm(r0, input, Operand(kPointerSizeLog2));
1533 __ LoadPX(kScratchReg, MemOperand(kScratchReg, r0));
1534 __ Jump(kScratchReg);
1535}
1536
1537
1538void CodeGenerator::AssembleDeoptimizerCall(
1539 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1540 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1541 isolate(), deoptimization_id, bailout_type);
1542 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1543}
1544
1545
1546void CodeGenerator::AssemblePrologue() {
1547 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1548 if (descriptor->IsCFunctionCall()) {
1549 __ function_descriptor();
1550 __ mflr(r0);
1551 if (FLAG_enable_embedded_constant_pool) {
1552 __ Push(r0, fp, kConstantPoolRegister);
1553 // Adjust FP to point to saved FP.
1554 __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
1555 } else {
1556 __ Push(r0, fp);
1557 __ mr(fp, sp);
1558 }
1559 } else if (descriptor->IsJSFunctionCall()) {
1560 __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
1561 } else if (frame()->needs_frame()) {
1562 if (!ABI_CALL_VIA_IP && info()->output_code_kind() == Code::WASM_FUNCTION) {
1563 // TODO(mbrandy): Restrict only to the wasm wrapper case.
1564 __ StubPrologue();
1565 } else {
1566 __ StubPrologue(ip);
1567 }
1568 } else {
1569 frame()->SetElidedFrameSizeInSlots(0);
1570 }
1571 frame_access_state()->SetFrameAccessToDefault();
1572
1573 int stack_shrink_slots = frame()->GetSpillSlotCount();
1574 if (info()->is_osr()) {
1575 // TurboFan OSR-compiled functions cannot be entered directly.
1576 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1577
1578 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1579 // frame is still on the stack. Optimized code uses OSR values directly from
1580 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1581 // remaining stack slots.
1582 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1583 osr_pc_offset_ = __ pc_offset();
1584 // TODO(titzer): cannot address target function == local #-1
1585 __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1586 stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1587 }
1588
1589 const RegList double_saves = descriptor->CalleeSavedFPRegisters();
1590 if (double_saves != 0) {
1591 stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
1592 }
1593 if (stack_shrink_slots > 0) {
1594 __ Add(sp, sp, -stack_shrink_slots * kPointerSize, r0);
1595 }
1596
1597 // Save callee-saved Double registers.
1598 if (double_saves != 0) {
1599 __ MultiPushDoubles(double_saves);
1600 DCHECK(kNumCalleeSavedDoubles ==
1601 base::bits::CountPopulation32(double_saves));
1602 frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
1603 (kDoubleSize / kPointerSize));
1604 }
1605
1606 // Save callee-saved registers.
1607 const RegList saves =
1608 FLAG_enable_embedded_constant_pool
1609 ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
1610 : descriptor->CalleeSavedRegisters();
1611 if (saves != 0) {
1612 __ MultiPush(saves);
1613 // register save area does not include the fp or constant pool pointer.
1614 const int num_saves =
1615 kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
1616 DCHECK(num_saves == base::bits::CountPopulation32(saves));
1617 frame()->AllocateSavedCalleeRegisterSlots(num_saves);
1618 }
1619}
1620
1621
1622void CodeGenerator::AssembleReturn() {
1623 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1624 int pop_count = static_cast<int>(descriptor->StackParameterCount());
1625
1626 // Restore registers.
1627 const RegList saves =
1628 FLAG_enable_embedded_constant_pool
1629 ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
1630 : descriptor->CalleeSavedRegisters();
1631 if (saves != 0) {
1632 __ MultiPop(saves);
1633 }
1634
1635 // Restore double registers.
1636 const RegList double_saves = descriptor->CalleeSavedFPRegisters();
1637 if (double_saves != 0) {
1638 __ MultiPopDoubles(double_saves);
1639 }
1640
1641 if (descriptor->IsCFunctionCall()) {
1642 __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
1643 } else if (frame()->needs_frame()) {
1644 // Canonicalize JSFunction return sites for now.
1645 if (return_label_.is_bound()) {
1646 __ b(&return_label_);
1647 return;
1648 } else {
1649 __ bind(&return_label_);
1650 __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
1651 }
1652 } else {
1653 __ Drop(pop_count);
1654 }
1655 __ Ret();
1656}
1657
1658
1659void CodeGenerator::AssembleMove(InstructionOperand* source,
1660 InstructionOperand* destination) {
1661 PPCOperandConverter g(this, nullptr);
1662 // Dispatch on the source and destination operand kinds. Not all
1663 // combinations are possible.
1664 if (source->IsRegister()) {
1665 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1666 Register src = g.ToRegister(source);
1667 if (destination->IsRegister()) {
1668 __ Move(g.ToRegister(destination), src);
1669 } else {
1670 __ StoreP(src, g.ToMemOperand(destination), r0);
1671 }
1672 } else if (source->IsStackSlot()) {
1673 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1674 MemOperand src = g.ToMemOperand(source);
1675 if (destination->IsRegister()) {
1676 __ LoadP(g.ToRegister(destination), src, r0);
1677 } else {
1678 Register temp = kScratchReg;
1679 __ LoadP(temp, src, r0);
1680 __ StoreP(temp, g.ToMemOperand(destination), r0);
1681 }
1682 } else if (source->IsConstant()) {
1683 Constant src = g.ToConstant(source);
1684 if (destination->IsRegister() || destination->IsStackSlot()) {
1685 Register dst =
1686 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1687 switch (src.type()) {
1688 case Constant::kInt32:
1689 __ mov(dst, Operand(src.ToInt32()));
1690 break;
1691 case Constant::kInt64:
1692 __ mov(dst, Operand(src.ToInt64()));
1693 break;
1694 case Constant::kFloat32:
1695 __ Move(dst,
1696 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1697 break;
1698 case Constant::kFloat64:
1699 __ Move(dst,
1700 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1701 break;
1702 case Constant::kExternalReference:
1703 __ mov(dst, Operand(src.ToExternalReference()));
1704 break;
1705 case Constant::kHeapObject: {
1706 Handle<HeapObject> src_object = src.ToHeapObject();
1707 Heap::RootListIndex index;
1708 int offset;
1709 if (IsMaterializableFromFrame(src_object, &offset)) {
1710 __ LoadP(dst, MemOperand(fp, offset));
1711 } else if (IsMaterializableFromRoot(src_object, &index)) {
1712 __ LoadRoot(dst, index);
1713 } else {
1714 __ Move(dst, src_object);
1715 }
1716 break;
1717 }
1718 case Constant::kRpoNumber:
1719 UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
1720 break;
1721 }
1722 if (destination->IsStackSlot()) {
1723 __ StoreP(dst, g.ToMemOperand(destination), r0);
1724 }
1725 } else {
1726 DoubleRegister dst = destination->IsDoubleRegister()
1727 ? g.ToDoubleRegister(destination)
1728 : kScratchDoubleReg;
1729 double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
1730 : src.ToFloat64();
1731 __ LoadDoubleLiteral(dst, value, kScratchReg);
1732 if (destination->IsDoubleStackSlot()) {
1733 __ StoreDouble(dst, g.ToMemOperand(destination), r0);
1734 }
1735 }
1736 } else if (source->IsDoubleRegister()) {
1737 DoubleRegister src = g.ToDoubleRegister(source);
1738 if (destination->IsDoubleRegister()) {
1739 DoubleRegister dst = g.ToDoubleRegister(destination);
1740 __ Move(dst, src);
1741 } else {
1742 DCHECK(destination->IsDoubleStackSlot());
1743 __ StoreDouble(src, g.ToMemOperand(destination), r0);
1744 }
1745 } else if (source->IsDoubleStackSlot()) {
1746 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1747 MemOperand src = g.ToMemOperand(source);
1748 if (destination->IsDoubleRegister()) {
1749 __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
1750 } else {
1751 DoubleRegister temp = kScratchDoubleReg;
1752 __ LoadDouble(temp, src, r0);
1753 __ StoreDouble(temp, g.ToMemOperand(destination), r0);
1754 }
1755 } else {
1756 UNREACHABLE();
1757 }
1758}
1759
1760
1761void CodeGenerator::AssembleSwap(InstructionOperand* source,
1762 InstructionOperand* destination) {
1763 PPCOperandConverter g(this, nullptr);
1764 // Dispatch on the source and destination operand kinds. Not all
1765 // combinations are possible.
1766 if (source->IsRegister()) {
1767 // Register-register.
1768 Register temp = kScratchReg;
1769 Register src = g.ToRegister(source);
1770 if (destination->IsRegister()) {
1771 Register dst = g.ToRegister(destination);
1772 __ mr(temp, src);
1773 __ mr(src, dst);
1774 __ mr(dst, temp);
1775 } else {
1776 DCHECK(destination->IsStackSlot());
1777 MemOperand dst = g.ToMemOperand(destination);
1778 __ mr(temp, src);
1779 __ LoadP(src, dst);
1780 __ StoreP(temp, dst);
1781 }
1782#if V8_TARGET_ARCH_PPC64
1783 } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
1784#else
1785 } else if (source->IsStackSlot()) {
1786 DCHECK(destination->IsStackSlot());
1787#endif
1788 Register temp_0 = kScratchReg;
1789 Register temp_1 = r0;
1790 MemOperand src = g.ToMemOperand(source);
1791 MemOperand dst = g.ToMemOperand(destination);
1792 __ LoadP(temp_0, src);
1793 __ LoadP(temp_1, dst);
1794 __ StoreP(temp_0, dst);
1795 __ StoreP(temp_1, src);
1796 } else if (source->IsDoubleRegister()) {
1797 DoubleRegister temp = kScratchDoubleReg;
1798 DoubleRegister src = g.ToDoubleRegister(source);
1799 if (destination->IsDoubleRegister()) {
1800 DoubleRegister dst = g.ToDoubleRegister(destination);
1801 __ fmr(temp, src);
1802 __ fmr(src, dst);
1803 __ fmr(dst, temp);
1804 } else {
1805 DCHECK(destination->IsDoubleStackSlot());
1806 MemOperand dst = g.ToMemOperand(destination);
1807 __ fmr(temp, src);
1808 __ lfd(src, dst);
1809 __ stfd(temp, dst);
1810 }
1811#if !V8_TARGET_ARCH_PPC64
1812 } else if (source->IsDoubleStackSlot()) {
1813 DCHECK(destination->IsDoubleStackSlot());
1814 DoubleRegister temp_0 = kScratchDoubleReg;
1815 DoubleRegister temp_1 = d0;
1816 MemOperand src = g.ToMemOperand(source);
1817 MemOperand dst = g.ToMemOperand(destination);
1818 __ lfd(temp_0, src);
1819 __ lfd(temp_1, dst);
1820 __ stfd(temp_0, dst);
1821 __ stfd(temp_1, src);
1822#endif
1823 } else {
1824 // No other combinations are possible.
1825 UNREACHABLE();
1826 }
1827}
1828
1829
1830void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1831 for (size_t index = 0; index < target_count; ++index) {
1832 __ emit_label_addr(targets[index]);
1833 }
1834}
1835
1836
1837void CodeGenerator::AddNopForSmiCodeInlining() {
1838 // We do not insert nops for inlined Smi code.
1839}
1840
1841
1842void CodeGenerator::EnsureSpaceForLazyDeopt() {
1843 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
1844 return;
1845 }
1846
1847 int space_needed = Deoptimizer::patch_size();
1848 // Ensure that we have enough space after the previous lazy-bailout
1849 // instruction for patching the code here.
1850 int current_pc = masm()->pc_offset();
1851 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1852 // Block tramoline pool emission for duration of padding.
1853 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
1854 masm());
1855 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1856 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1857 while (padding_size > 0) {
1858 __ nop();
1859 padding_size -= v8::internal::Assembler::kInstrSize;
1860 }
1861 }
1862}
1863
1864#undef __
1865
1866} // namespace compiler
1867} // namespace internal
1868} // namespace v8