blob: 68c1d9d587a4837e4ae022afd1224b3929688e30 [file] [log] [blame]
Ben Murdochda12d292016-06-02 14:46:10 +01001// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
7#include "src/ast/scopes.h"
8#include "src/compiler/code-generator-impl.h"
9#include "src/compiler/gap-resolver.h"
10#include "src/compiler/node-matchers.h"
11#include "src/compiler/osr.h"
12#include "src/s390/macro-assembler-s390.h"
13
14namespace v8 {
15namespace internal {
16namespace compiler {
17
18#define __ masm()->
19
20#define kScratchReg ip
21
22// Adds S390-specific methods to convert InstructionOperands.
23class S390OperandConverter final : public InstructionOperandConverter {
24 public:
25 S390OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
27
28 size_t OutputCount() { return instr_->OutputCount(); }
29
30 bool CompareLogical() const {
31 switch (instr_->flags_condition()) {
32 case kUnsignedLessThan:
33 case kUnsignedGreaterThanOrEqual:
34 case kUnsignedLessThanOrEqual:
35 case kUnsignedGreaterThan:
36 return true;
37 default:
38 return false;
39 }
40 UNREACHABLE();
41 return false;
42 }
43
44 Operand InputImmediate(size_t index) {
45 Constant constant = ToConstant(instr_->InputAt(index));
46 switch (constant.type()) {
47 case Constant::kInt32:
48 return Operand(constant.ToInt32());
49 case Constant::kFloat32:
50 return Operand(
51 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
52 case Constant::kFloat64:
53 return Operand(
54 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
55 case Constant::kInt64:
56#if V8_TARGET_ARCH_S390X
57 return Operand(constant.ToInt64());
58#endif
59 case Constant::kExternalReference:
60 case Constant::kHeapObject:
61 case Constant::kRpoNumber:
62 break;
63 }
64 UNREACHABLE();
65 return Operand::Zero();
66 }
67
68 MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
69 const size_t index = *first_index;
70 *mode = AddressingModeField::decode(instr_->opcode());
71 switch (*mode) {
72 case kMode_None:
73 break;
74 case kMode_MRI:
75 *first_index += 2;
76 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
77 case kMode_MRR:
78 *first_index += 2;
79 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
80 }
81 UNREACHABLE();
82 return MemOperand(r0);
83 }
84
85 MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
86 return MemoryOperand(mode, &first_index);
87 }
88
89 MemOperand ToMemOperand(InstructionOperand* op) const {
90 DCHECK_NOT_NULL(op);
91 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
92 return SlotToMemOperand(AllocatedOperand::cast(op)->index());
93 }
94
95 MemOperand SlotToMemOperand(int slot) const {
96 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
97 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
98 }
99};
100
101static inline bool HasRegisterInput(Instruction* instr, int index) {
102 return instr->InputAt(index)->IsRegister();
103}
104
105namespace {
106
107class OutOfLineLoadNAN32 final : public OutOfLineCode {
108 public:
109 OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
110 : OutOfLineCode(gen), result_(result) {}
111
112 void Generate() final {
113 __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
114 kScratchReg);
115 }
116
117 private:
118 DoubleRegister const result_;
119};
120
121class OutOfLineLoadNAN64 final : public OutOfLineCode {
122 public:
123 OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
124 : OutOfLineCode(gen), result_(result) {}
125
126 void Generate() final {
127 __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
128 kScratchReg);
129 }
130
131 private:
132 DoubleRegister const result_;
133};
134
135class OutOfLineLoadZero final : public OutOfLineCode {
136 public:
137 OutOfLineLoadZero(CodeGenerator* gen, Register result)
138 : OutOfLineCode(gen), result_(result) {}
139
140 void Generate() final { __ LoadImmP(result_, Operand::Zero()); }
141
142 private:
143 Register const result_;
144};
145
146class OutOfLineRecordWrite final : public OutOfLineCode {
147 public:
148 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
149 Register value, Register scratch0, Register scratch1,
150 RecordWriteMode mode)
151 : OutOfLineCode(gen),
152 object_(object),
153 offset_(offset),
154 offset_immediate_(0),
155 value_(value),
156 scratch0_(scratch0),
157 scratch1_(scratch1),
158 mode_(mode) {}
159
160 OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
161 Register value, Register scratch0, Register scratch1,
162 RecordWriteMode mode)
163 : OutOfLineCode(gen),
164 object_(object),
165 offset_(no_reg),
166 offset_immediate_(offset),
167 value_(value),
168 scratch0_(scratch0),
169 scratch1_(scratch1),
170 mode_(mode),
171 must_save_lr_(!gen->frame_access_state()->has_frame()) {}
172
173 void Generate() final {
174 if (mode_ > RecordWriteMode::kValueIsPointer) {
175 __ JumpIfSmi(value_, exit());
176 }
177 __ CheckPageFlag(value_, scratch0_,
178 MemoryChunk::kPointersToHereAreInterestingMask, eq,
179 exit());
180 RememberedSetAction const remembered_set_action =
181 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
182 : OMIT_REMEMBERED_SET;
183 SaveFPRegsMode const save_fp_mode =
184 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
185 if (must_save_lr_) {
186 // We need to save and restore r14 if the frame was elided.
187 __ Push(r14);
188 }
189 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
190 remembered_set_action, save_fp_mode);
191 if (offset_.is(no_reg)) {
192 __ AddP(scratch1_, object_, Operand(offset_immediate_));
193 } else {
194 DCHECK_EQ(0, offset_immediate_);
195 __ AddP(scratch1_, object_, offset_);
196 }
197 __ CallStub(&stub);
198 if (must_save_lr_) {
199 // We need to save and restore r14 if the frame was elided.
200 __ Pop(r14);
201 }
202 }
203
204 private:
205 Register const object_;
206 Register const offset_;
207 int32_t const offset_immediate_; // Valid if offset_.is(no_reg).
208 Register const value_;
209 Register const scratch0_;
210 Register const scratch1_;
211 RecordWriteMode const mode_;
212 bool must_save_lr_;
213};
214
215Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
216 switch (condition) {
217 case kEqual:
218 return eq;
219 case kNotEqual:
220 return ne;
221 case kSignedLessThan:
222 case kUnsignedLessThan:
223 return lt;
224 case kSignedGreaterThanOrEqual:
225 case kUnsignedGreaterThanOrEqual:
226 return ge;
227 case kSignedLessThanOrEqual:
228 case kUnsignedLessThanOrEqual:
229 return le;
230 case kSignedGreaterThan:
231 case kUnsignedGreaterThan:
232 return gt;
233 case kOverflow:
234 // Overflow checked for AddP/SubP only.
235 switch (op) {
236#if V8_TARGET_ARCH_S390X
237 case kS390_Add:
238 case kS390_Sub:
239 return lt;
240#endif
241 case kS390_AddWithOverflow32:
242 case kS390_SubWithOverflow32:
243#if V8_TARGET_ARCH_S390X
244 return ne;
245#else
246 return lt;
247#endif
248 default:
249 break;
250 }
251 break;
252 case kNotOverflow:
253 switch (op) {
254#if V8_TARGET_ARCH_S390X
255 case kS390_Add:
256 case kS390_Sub:
257 return ge;
258#endif
259 case kS390_AddWithOverflow32:
260 case kS390_SubWithOverflow32:
261#if V8_TARGET_ARCH_S390X
262 return eq;
263#else
264 return ge;
265#endif
266 default:
267 break;
268 }
269 break;
270 default:
271 break;
272 }
273 UNREACHABLE();
274 return kNoCondition;
275}
276
277} // namespace
278
279#define ASSEMBLE_FLOAT_UNOP(asm_instr) \
280 do { \
281 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
282 } while (0)
283
284#define ASSEMBLE_FLOAT_BINOP(asm_instr) \
285 do { \
286 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
287 i.InputDoubleRegister(1)); \
288 } while (0)
289
290#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \
291 do { \
292 if (HasRegisterInput(instr, 1)) { \
293 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
294 i.InputRegister(1)); \
295 } else { \
296 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
297 i.InputImmediate(1)); \
298 } \
299 } while (0)
300
301#define ASSEMBLE_BINOP_INT(asm_instr_reg, asm_instr_imm) \
302 do { \
303 if (HasRegisterInput(instr, 1)) { \
304 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
305 i.InputRegister(1)); \
306 } else { \
307 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
308 i.InputInt32(1)); \
309 } \
310 } while (0)
311
312#define ASSEMBLE_ADD_WITH_OVERFLOW() \
313 do { \
314 if (HasRegisterInput(instr, 1)) { \
315 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
316 i.InputRegister(1), kScratchReg, r0); \
317 } else { \
318 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
319 i.InputInt32(1), kScratchReg, r0); \
320 } \
321 } while (0)
322
323#define ASSEMBLE_SUB_WITH_OVERFLOW() \
324 do { \
325 if (HasRegisterInput(instr, 1)) { \
326 __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
327 i.InputRegister(1), kScratchReg, r0); \
328 } else { \
329 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
330 -i.InputInt32(1), kScratchReg, r0); \
331 } \
332 } while (0)
333
334#if V8_TARGET_ARCH_S390X
335#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
336 do { \
337 ASSEMBLE_BINOP(AddP, AddP); \
338 __ TestIfInt32(i.OutputRegister(), r0); \
339 } while (0)
340
341#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
342 do { \
343 ASSEMBLE_BINOP(SubP, SubP); \
344 __ TestIfInt32(i.OutputRegister(), r0); \
345 } while (0)
346#else
347#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
348#define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
349#endif
350
351#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
352 do { \
353 if (HasRegisterInput(instr, 1)) { \
354 if (i.CompareLogical()) { \
355 __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
356 } else { \
357 __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
358 } \
359 } else { \
360 if (i.CompareLogical()) { \
361 __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
362 } else { \
363 __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
364 } \
365 } \
366 } while (0)
367
368#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
369 do { \
370 __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1); \
371 } while (0)
372
373// Divide instruction dr will implicity use register pair
374// r0 & r1 below.
375// R0:R1 = R1 / divisor - R0 remainder
376// Copy remainder to output reg
377#define ASSEMBLE_MODULO(div_instr, shift_instr) \
378 do { \
379 __ LoadRR(r0, i.InputRegister(0)); \
380 __ shift_instr(r0, Operand(32)); \
381 __ div_instr(r0, i.InputRegister(1)); \
382 __ ltr(i.OutputRegister(), r0); \
383 } while (0)
384
385#define ASSEMBLE_FLOAT_MODULO() \
386 do { \
387 FrameScope scope(masm(), StackFrame::MANUAL); \
388 __ PrepareCallCFunction(0, 2, kScratchReg); \
389 __ MovToFloatParameters(i.InputDoubleRegister(0), \
390 i.InputDoubleRegister(1)); \
391 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
392 0, 2); \
393 __ MovFromFloatResult(i.OutputDoubleRegister()); \
394 } while (0)
395
396#define ASSEMBLE_FLOAT_MAX(double_scratch_reg, general_scratch_reg) \
397 do { \
398 Label ge, done; \
399 __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
400 __ bge(&ge, Label::kNear); \
401 __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
402 __ b(&done, Label::kNear); \
403 __ bind(&ge); \
404 __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
405 __ bind(&done); \
406 } while (0)
407
408#define ASSEMBLE_FLOAT_MIN(double_scratch_reg, general_scratch_reg) \
409 do { \
410 Label ge, done; \
411 __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
412 __ bge(&ge, Label::kNear); \
413 __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
414 __ b(&done, Label::kNear); \
415 __ bind(&ge); \
416 __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
417 __ bind(&done); \
418 } while (0)
419
420// Only MRI mode for these instructions available
421#define ASSEMBLE_LOAD_FLOAT(asm_instr) \
422 do { \
423 DoubleRegister result = i.OutputDoubleRegister(); \
424 AddressingMode mode = kMode_None; \
425 MemOperand operand = i.MemoryOperand(&mode); \
426 __ asm_instr(result, operand); \
427 } while (0)
428
429#define ASSEMBLE_LOAD_INTEGER(asm_instr) \
430 do { \
431 Register result = i.OutputRegister(); \
432 AddressingMode mode = kMode_None; \
433 MemOperand operand = i.MemoryOperand(&mode); \
434 __ asm_instr(result, operand); \
435 } while (0)
436
437#define ASSEMBLE_STORE_FLOAT32() \
438 do { \
439 size_t index = 0; \
440 AddressingMode mode = kMode_None; \
441 MemOperand operand = i.MemoryOperand(&mode, &index); \
442 DoubleRegister value = i.InputDoubleRegister(index); \
443 __ StoreFloat32(value, operand); \
444 } while (0)
445
446#define ASSEMBLE_STORE_DOUBLE() \
447 do { \
448 size_t index = 0; \
449 AddressingMode mode = kMode_None; \
450 MemOperand operand = i.MemoryOperand(&mode, &index); \
451 DoubleRegister value = i.InputDoubleRegister(index); \
452 __ StoreDouble(value, operand); \
453 } while (0)
454
455#define ASSEMBLE_STORE_INTEGER(asm_instr) \
456 do { \
457 size_t index = 0; \
458 AddressingMode mode = kMode_None; \
459 MemOperand operand = i.MemoryOperand(&mode, &index); \
460 Register value = i.InputRegister(index); \
461 __ asm_instr(value, operand); \
462 } while (0)
463
464// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
465#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width) \
466 do { \
467 DoubleRegister result = i.OutputDoubleRegister(); \
468 size_t index = 0; \
469 AddressingMode mode = kMode_None; \
470 MemOperand operand = i.MemoryOperand(&mode, index); \
471 Register offset = operand.rb(); \
472 __ lgfr(offset, offset); \
473 if (HasRegisterInput(instr, 2)) { \
474 __ CmpLogical32(offset, i.InputRegister(2)); \
475 } else { \
476 __ CmpLogical32(offset, i.InputImmediate(2)); \
477 } \
478 auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
479 __ bge(ool->entry()); \
480 __ asm_instr(result, operand); \
481 __ bind(ool->exit()); \
482 } while (0)
483
484// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
485#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
486 do { \
487 Register result = i.OutputRegister(); \
488 size_t index = 0; \
489 AddressingMode mode = kMode_None; \
490 MemOperand operand = i.MemoryOperand(&mode, index); \
491 Register offset = operand.rb(); \
492 __ lgfr(offset, offset); \
493 if (HasRegisterInput(instr, 2)) { \
494 __ CmpLogical32(offset, i.InputRegister(2)); \
495 } else { \
496 __ CmpLogical32(offset, i.InputImmediate(2)); \
497 } \
498 auto ool = new (zone()) OutOfLineLoadZero(this, result); \
499 __ bge(ool->entry()); \
500 __ asm_instr(result, operand); \
501 __ bind(ool->exit()); \
502 } while (0)
503
504// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
505#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
506 do { \
507 Label done; \
508 size_t index = 0; \
509 AddressingMode mode = kMode_None; \
510 MemOperand operand = i.MemoryOperand(&mode, index); \
511 Register offset = operand.rb(); \
512 __ lgfr(offset, offset); \
513 if (HasRegisterInput(instr, 2)) { \
514 __ CmpLogical32(offset, i.InputRegister(2)); \
515 } else { \
516 __ CmpLogical32(offset, i.InputImmediate(2)); \
517 } \
518 __ bge(&done); \
519 DoubleRegister value = i.InputDoubleRegister(3); \
520 __ StoreFloat32(value, operand); \
521 __ bind(&done); \
522 } while (0)
523
524// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
525#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
526 do { \
527 Label done; \
528 size_t index = 0; \
529 AddressingMode mode = kMode_None; \
530 MemOperand operand = i.MemoryOperand(&mode, index); \
531 DCHECK_EQ(kMode_MRR, mode); \
532 Register offset = operand.rb(); \
533 __ lgfr(offset, offset); \
534 if (HasRegisterInput(instr, 2)) { \
535 __ CmpLogical32(offset, i.InputRegister(2)); \
536 } else { \
537 __ CmpLogical32(offset, i.InputImmediate(2)); \
538 } \
539 __ bge(&done); \
540 DoubleRegister value = i.InputDoubleRegister(3); \
541 __ StoreDouble(value, operand); \
542 __ bind(&done); \
543 } while (0)
544
545// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
546#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
547 do { \
548 Label done; \
549 size_t index = 0; \
550 AddressingMode mode = kMode_None; \
551 MemOperand operand = i.MemoryOperand(&mode, index); \
552 Register offset = operand.rb(); \
553 __ lgfr(offset, offset); \
554 if (HasRegisterInput(instr, 2)) { \
555 __ CmpLogical32(offset, i.InputRegister(2)); \
556 } else { \
557 __ CmpLogical32(offset, i.InputImmediate(2)); \
558 } \
559 __ bge(&done); \
560 Register value = i.InputRegister(3); \
561 __ asm_instr(value, operand); \
562 __ bind(&done); \
563 } while (0)
564
565void CodeGenerator::AssembleDeconstructFrame() {
566 __ LeaveFrame(StackFrame::MANUAL);
567}
568
569void CodeGenerator::AssembleSetupStackPointer() {}
570
571void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
572 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
573 if (sp_slot_delta > 0) {
574 __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
575 }
576 frame_access_state()->SetFrameAccessToDefault();
577}
578
579void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
580 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
581 if (sp_slot_delta < 0) {
582 __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
583 frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
584 }
585 if (frame_access_state()->has_frame()) {
586 __ RestoreFrameStateForTailCall();
587 }
588 frame_access_state()->SetFrameAccessToSP();
589}
590
591void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
592 Register scratch1,
593 Register scratch2,
594 Register scratch3) {
595 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
596 Label done;
597
598 // Check if current frame is an arguments adaptor frame.
599 __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
600 __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
601 __ bne(&done);
602
603 // Load arguments count from current arguments adaptor frame (note, it
604 // does not include receiver).
605 Register caller_args_count_reg = scratch1;
606 __ LoadP(caller_args_count_reg,
607 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
608 __ SmiUntag(caller_args_count_reg);
609
610 ParameterCount callee_args_count(args_reg);
611 __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
612 scratch3);
613 __ bind(&done);
614}
615
616// Assembles an instruction after register allocation, producing machine code.
617void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
618 S390OperandConverter i(this, instr);
619 ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
620
621 switch (opcode) {
622 case kArchCallCodeObject: {
623 EnsureSpaceForLazyDeopt();
624 if (HasRegisterInput(instr, 0)) {
625 __ AddP(ip, i.InputRegister(0),
626 Operand(Code::kHeaderSize - kHeapObjectTag));
627 __ Call(ip);
628 } else {
629 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
630 RelocInfo::CODE_TARGET);
631 }
632 RecordCallPosition(instr);
633 frame_access_state()->ClearSPDelta();
634 break;
635 }
636 case kArchTailCallCodeObjectFromJSFunction:
637 case kArchTailCallCodeObject: {
638 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
639 AssembleDeconstructActivationRecord(stack_param_delta);
640 if (opcode == kArchTailCallCodeObjectFromJSFunction) {
641 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
642 i.TempRegister(0), i.TempRegister(1),
643 i.TempRegister(2));
644 }
645 if (HasRegisterInput(instr, 0)) {
646 __ AddP(ip, i.InputRegister(0),
647 Operand(Code::kHeaderSize - kHeapObjectTag));
648 __ Jump(ip);
649 } else {
650 // We cannot use the constant pool to load the target since
651 // we've already restored the caller's frame.
652 ConstantPoolUnavailableScope constant_pool_unavailable(masm());
653 __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
654 RelocInfo::CODE_TARGET);
655 }
656 frame_access_state()->ClearSPDelta();
657 break;
658 }
659 case kArchCallJSFunction: {
660 EnsureSpaceForLazyDeopt();
661 Register func = i.InputRegister(0);
662 if (FLAG_debug_code) {
663 // Check the function's context matches the context argument.
664 __ LoadP(kScratchReg,
665 FieldMemOperand(func, JSFunction::kContextOffset));
666 __ CmpP(cp, kScratchReg);
667 __ Assert(eq, kWrongFunctionContext);
668 }
669 __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
670 __ Call(ip);
671 RecordCallPosition(instr);
672 frame_access_state()->ClearSPDelta();
673 break;
674 }
675 case kArchTailCallJSFunctionFromJSFunction:
676 case kArchTailCallJSFunction: {
677 Register func = i.InputRegister(0);
678 if (FLAG_debug_code) {
679 // Check the function's context matches the context argument.
680 __ LoadP(kScratchReg,
681 FieldMemOperand(func, JSFunction::kContextOffset));
682 __ CmpP(cp, kScratchReg);
683 __ Assert(eq, kWrongFunctionContext);
684 }
685 int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
686 AssembleDeconstructActivationRecord(stack_param_delta);
687 if (opcode == kArchTailCallJSFunctionFromJSFunction) {
688 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
689 i.TempRegister(0), i.TempRegister(1),
690 i.TempRegister(2));
691 }
692 __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
693 __ Jump(ip);
694 frame_access_state()->ClearSPDelta();
695 break;
696 }
697 case kArchPrepareCallCFunction: {
698 int const num_parameters = MiscField::decode(instr->opcode());
699 __ PrepareCallCFunction(num_parameters, kScratchReg);
700 // Frame alignment requires using FP-relative frame addressing.
701 frame_access_state()->SetFrameAccessToFP();
702 break;
703 }
704 case kArchPrepareTailCall:
705 AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
706 break;
707 case kArchCallCFunction: {
708 int const num_parameters = MiscField::decode(instr->opcode());
709 if (instr->InputAt(0)->IsImmediate()) {
710 ExternalReference ref = i.InputExternalReference(0);
711 __ CallCFunction(ref, num_parameters);
712 } else {
713 Register func = i.InputRegister(0);
714 __ CallCFunction(func, num_parameters);
715 }
716 frame_access_state()->SetFrameAccessToDefault();
717 frame_access_state()->ClearSPDelta();
718 break;
719 }
720 case kArchJmp:
721 AssembleArchJump(i.InputRpo(0));
722 break;
723 case kArchLookupSwitch:
724 AssembleArchLookupSwitch(instr);
725 break;
726 case kArchTableSwitch:
727 AssembleArchTableSwitch(instr);
728 break;
729 case kArchNop:
730 case kArchThrowTerminator:
731 // don't emit code for nops.
732 break;
733 case kArchDeoptimize: {
734 int deopt_state_id =
735 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
736 Deoptimizer::BailoutType bailout_type =
737 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
738 AssembleDeoptimizerCall(deopt_state_id, bailout_type);
739 break;
740 }
741 case kArchRet:
742 AssembleReturn();
743 break;
744 case kArchStackPointer:
745 __ LoadRR(i.OutputRegister(), sp);
746 break;
747 case kArchFramePointer:
748 __ LoadRR(i.OutputRegister(), fp);
749 break;
750 case kArchParentFramePointer:
751 if (frame_access_state()->has_frame()) {
752 __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
753 } else {
754 __ LoadRR(i.OutputRegister(), fp);
755 }
756 break;
757 case kArchTruncateDoubleToI:
758 // TODO(mbrandy): move slow call to stub out of line.
759 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
760 break;
761 case kArchStoreWithWriteBarrier: {
762 RecordWriteMode mode =
763 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
764 Register object = i.InputRegister(0);
765 Register value = i.InputRegister(2);
766 Register scratch0 = i.TempRegister(0);
767 Register scratch1 = i.TempRegister(1);
768 OutOfLineRecordWrite* ool;
769
770 AddressingMode addressing_mode =
771 AddressingModeField::decode(instr->opcode());
772 if (addressing_mode == kMode_MRI) {
773 int32_t offset = i.InputInt32(1);
774 ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
775 scratch0, scratch1, mode);
776 __ StoreP(value, MemOperand(object, offset));
777 } else {
778 DCHECK_EQ(kMode_MRR, addressing_mode);
779 Register offset(i.InputRegister(1));
780 ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
781 scratch0, scratch1, mode);
782 __ StoreP(value, MemOperand(object, offset));
783 }
784 __ CheckPageFlag(object, scratch0,
785 MemoryChunk::kPointersFromHereAreInterestingMask, ne,
786 ool->entry());
787 __ bind(ool->exit());
788 break;
789 }
790 case kArchStackSlot: {
791 FrameOffset offset =
792 frame_access_state()->GetFrameOffset(i.InputInt32(0));
793 __ AddP(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
794 Operand(offset.offset()));
795 break;
796 }
797 case kS390_And:
798 ASSEMBLE_BINOP(AndP, AndP);
799 break;
800 case kS390_AndComplement:
801 __ NotP(i.InputRegister(1));
802 __ AndP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
803 break;
804 case kS390_Or:
805 ASSEMBLE_BINOP(OrP, OrP);
806 break;
807 case kS390_OrComplement:
808 __ NotP(i.InputRegister(1));
809 __ OrP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
810 break;
811 case kS390_Xor:
812 ASSEMBLE_BINOP(XorP, XorP);
813 break;
814 case kS390_ShiftLeft32:
815 if (HasRegisterInput(instr, 1)) {
816 if (i.OutputRegister().is(i.InputRegister(1)) &&
817 !CpuFeatures::IsSupported(DISTINCT_OPS)) {
818 __ LoadRR(kScratchReg, i.InputRegister(1));
819 __ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
820 } else {
821 ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
822 }
823 } else {
824 ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
825 }
826 __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
827 break;
828#if V8_TARGET_ARCH_S390X
829 case kS390_ShiftLeft64:
830 ASSEMBLE_BINOP(sllg, sllg);
831 break;
832#endif
833 case kS390_ShiftRight32:
834 if (HasRegisterInput(instr, 1)) {
835 if (i.OutputRegister().is(i.InputRegister(1)) &&
836 !CpuFeatures::IsSupported(DISTINCT_OPS)) {
837 __ LoadRR(kScratchReg, i.InputRegister(1));
838 __ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
839 } else {
840 ASSEMBLE_BINOP(ShiftRight, ShiftRight);
841 }
842 } else {
843 ASSEMBLE_BINOP(ShiftRight, ShiftRight);
844 }
845 __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
846 break;
847#if V8_TARGET_ARCH_S390X
848 case kS390_ShiftRight64:
849 ASSEMBLE_BINOP(srlg, srlg);
850 break;
851#endif
852 case kS390_ShiftRightArith32:
853 if (HasRegisterInput(instr, 1)) {
854 if (i.OutputRegister().is(i.InputRegister(1)) &&
855 !CpuFeatures::IsSupported(DISTINCT_OPS)) {
856 __ LoadRR(kScratchReg, i.InputRegister(1));
857 __ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
858 kScratchReg);
859 } else {
860 ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
861 }
862 } else {
863 ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
864 }
865 __ LoadlW(i.OutputRegister(), i.OutputRegister());
866 break;
867#if V8_TARGET_ARCH_S390X
868 case kS390_ShiftRightArith64:
869 ASSEMBLE_BINOP(srag, srag);
870 break;
871#endif
872#if !V8_TARGET_ARCH_S390X
873 case kS390_AddPair:
874 // i.InputRegister(0) ... left low word.
875 // i.InputRegister(1) ... left high word.
876 // i.InputRegister(2) ... right low word.
877 // i.InputRegister(3) ... right high word.
878 __ AddLogical32(i.OutputRegister(0), i.InputRegister(0),
879 i.InputRegister(2));
880 __ AddLogicalWithCarry32(i.OutputRegister(1), i.InputRegister(1),
881 i.InputRegister(3));
882 break;
883 case kS390_SubPair:
884 // i.InputRegister(0) ... left low word.
885 // i.InputRegister(1) ... left high word.
886 // i.InputRegister(2) ... right low word.
887 // i.InputRegister(3) ... right high word.
888 __ SubLogical32(i.OutputRegister(0), i.InputRegister(0),
889 i.InputRegister(2));
890 __ SubLogicalWithBorrow32(i.OutputRegister(1), i.InputRegister(1),
891 i.InputRegister(3));
892 break;
893 case kS390_MulPair:
894 // i.InputRegister(0) ... left low word.
895 // i.InputRegister(1) ... left high word.
896 // i.InputRegister(2) ... right low word.
897 // i.InputRegister(3) ... right high word.
898 __ sllg(r0, i.InputRegister(1), Operand(32));
899 __ sllg(r1, i.InputRegister(3), Operand(32));
900 __ lr(r0, i.InputRegister(0));
901 __ lr(r1, i.InputRegister(2));
902 __ msgr(r1, r0);
903 __ lr(i.OutputRegister(0), r1);
904 __ srag(i.OutputRegister(1), r1, Operand(32));
905 break;
906 case kS390_ShiftLeftPair:
907 if (instr->InputAt(2)->IsImmediate()) {
908 __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
909 i.InputRegister(0), i.InputRegister(1),
910 i.InputInt32(2));
911 } else {
912 __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
913 i.InputRegister(0), i.InputRegister(1), kScratchReg,
914 i.InputRegister(2));
915 }
916 break;
917 case kS390_ShiftRightPair:
918 if (instr->InputAt(2)->IsImmediate()) {
919 __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
920 i.InputRegister(0), i.InputRegister(1),
921 i.InputInt32(2));
922 } else {
923 __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
924 i.InputRegister(0), i.InputRegister(1), kScratchReg,
925 i.InputRegister(2));
926 }
927 break;
928 case kS390_ShiftRightArithPair:
929 if (instr->InputAt(2)->IsImmediate()) {
930 __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
931 i.InputRegister(0), i.InputRegister(1),
932 i.InputInt32(2));
933 } else {
934 __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
935 i.InputRegister(0), i.InputRegister(1),
936 kScratchReg, i.InputRegister(2));
937 }
938 break;
939#endif
940 case kS390_RotRight32:
941 if (HasRegisterInput(instr, 1)) {
942 __ LoadComplementRR(kScratchReg, i.InputRegister(1));
943 __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
944 } else {
945 __ rll(i.OutputRegister(), i.InputRegister(0),
946 Operand(32 - i.InputInt32(1)));
947 }
948 break;
949#if V8_TARGET_ARCH_S390X
950 case kS390_RotRight64:
951 if (HasRegisterInput(instr, 1)) {
952 __ LoadComplementRR(kScratchReg, i.InputRegister(1));
953 __ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
954 } else {
955 __ rllg(i.OutputRegister(), i.InputRegister(0),
956 Operand(64 - i.InputInt32(1)));
957 }
958 break;
959#endif
960 case kS390_Not:
961 __ LoadRR(i.OutputRegister(), i.InputRegister(0));
962 __ NotP(i.OutputRegister());
963 break;
964 case kS390_RotLeftAndMask32:
965 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
966 int shiftAmount = i.InputInt32(1);
967 int endBit = 63 - i.InputInt32(3);
968 int startBit = 63 - i.InputInt32(2);
969 __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
970 __ risbg(i.OutputRegister(), i.OutputRegister(), Operand(startBit),
971 Operand(endBit), Operand::Zero(), true);
972 } else {
973 int shiftAmount = i.InputInt32(1);
974 int clearBitLeft = 63 - i.InputInt32(2);
975 int clearBitRight = i.InputInt32(3);
976 __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
977 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitLeft));
978 __ srlg(i.OutputRegister(), i.OutputRegister(),
979 Operand((clearBitLeft + clearBitRight)));
980 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitRight));
981 }
982 break;
983#if V8_TARGET_ARCH_S390X
984 case kS390_RotLeftAndClear64:
985 UNIMPLEMENTED(); // Find correct instruction
986 break;
987 case kS390_RotLeftAndClearLeft64:
988 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
989 int shiftAmount = i.InputInt32(1);
990 int endBit = 63;
991 int startBit = 63 - i.InputInt32(2);
992 __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
993 Operand(endBit), Operand(shiftAmount), true);
994 } else {
995 int shiftAmount = i.InputInt32(1);
996 int clearBit = 63 - i.InputInt32(2);
997 __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
998 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
999 __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1000 }
1001 break;
1002 case kS390_RotLeftAndClearRight64:
1003 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1004 int shiftAmount = i.InputInt32(1);
1005 int endBit = 63 - i.InputInt32(2);
1006 int startBit = 0;
1007 __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
1008 Operand(endBit), Operand(shiftAmount), true);
1009 } else {
1010 int shiftAmount = i.InputInt32(1);
1011 int clearBit = i.InputInt32(2);
1012 __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1013 __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1014 __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1015 }
1016 break;
1017#endif
1018 case kS390_Add:
1019#if V8_TARGET_ARCH_S390X
1020 if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
1021 ASSEMBLE_ADD_WITH_OVERFLOW();
1022 } else {
1023#endif
1024 ASSEMBLE_BINOP(AddP, AddP);
1025#if V8_TARGET_ARCH_S390X
1026 }
1027#endif
1028 break;
1029 case kS390_AddWithOverflow32:
1030 ASSEMBLE_ADD_WITH_OVERFLOW32();
1031 break;
1032 case kS390_AddFloat:
1033 // Ensure we don't clobber right/InputReg(1)
1034 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1035 ASSEMBLE_FLOAT_UNOP(aebr);
1036 } else {
1037 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1038 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1039 __ aebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1040 }
1041 break;
1042 case kS390_AddDouble:
1043 // Ensure we don't clobber right/InputReg(1)
1044 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1045 ASSEMBLE_FLOAT_UNOP(adbr);
1046 } else {
1047 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1048 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1049 __ adbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1050 }
1051 break;
1052 case kS390_Sub:
1053#if V8_TARGET_ARCH_S390X
1054 if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
1055 ASSEMBLE_SUB_WITH_OVERFLOW();
1056 } else {
1057#endif
1058 ASSEMBLE_BINOP(SubP, SubP);
1059#if V8_TARGET_ARCH_S390X
1060 }
1061#endif
1062 break;
1063 case kS390_SubWithOverflow32:
1064 ASSEMBLE_SUB_WITH_OVERFLOW32();
1065 break;
1066 case kS390_SubFloat:
1067 // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
1068 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1069 __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
1070 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1071 __ sebr(i.OutputDoubleRegister(), kScratchDoubleReg);
1072 } else {
1073 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
1074 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1075 }
1076 __ sebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1077 }
1078 break;
1079 case kS390_SubDouble:
1080 // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
1081 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1082 __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
1083 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1084 __ sdbr(i.OutputDoubleRegister(), kScratchDoubleReg);
1085 } else {
1086 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
1087 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1088 }
1089 __ sdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1090 }
1091 break;
1092 case kS390_Mul32:
1093#if V8_TARGET_ARCH_S390X
1094 case kS390_Mul64:
1095#endif
1096 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1097 break;
1098 case kS390_MulHigh32:
1099 __ LoadRR(r1, i.InputRegister(0));
1100 __ mr_z(r0, i.InputRegister(1));
1101 __ LoadW(i.OutputRegister(), r0);
1102 break;
1103 case kS390_MulHighU32:
1104 __ LoadRR(r1, i.InputRegister(0));
1105 __ mlr(r0, i.InputRegister(1));
1106 __ LoadlW(i.OutputRegister(), r0);
1107 break;
1108 case kS390_MulFloat:
1109 // Ensure we don't clobber right
1110 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1111 ASSEMBLE_FLOAT_UNOP(meebr);
1112 } else {
1113 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1114 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1115 __ meebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1116 }
1117 break;
1118 case kS390_MulDouble:
1119 // Ensure we don't clobber right
1120 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1121 ASSEMBLE_FLOAT_UNOP(mdbr);
1122 } else {
1123 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1124 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1125 __ mdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1126 }
1127 break;
1128#if V8_TARGET_ARCH_S390X
1129 case kS390_Div64:
1130 __ LoadRR(r1, i.InputRegister(0));
1131 __ dsgr(r0, i.InputRegister(1)); // R1: Dividend
1132 __ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
1133 break;
1134#endif
1135 case kS390_Div32:
1136 __ LoadRR(r0, i.InputRegister(0));
1137 __ srda(r0, Operand(32));
1138 __ dr(r0, i.InputRegister(1));
1139 __ LoadAndTestP_ExtendSrc(i.OutputRegister(),
1140 r1); // Copy R1: Quotient to output
1141 break;
1142#if V8_TARGET_ARCH_S390X
1143 case kS390_DivU64:
1144 __ LoadRR(r1, i.InputRegister(0));
1145 __ LoadImmP(r0, Operand::Zero());
1146 __ dlgr(r0, i.InputRegister(1)); // R0:R1: Dividend
1147 __ ltgr(i.OutputRegister(), r1); // Copy R1: Quotient to output
1148 break;
1149#endif
1150 case kS390_DivU32:
1151 __ LoadRR(r0, i.InputRegister(0));
1152 __ srdl(r0, Operand(32));
1153 __ dlr(r0, i.InputRegister(1)); // R0:R1: Dividend
1154 __ LoadlW(i.OutputRegister(), r1); // Copy R1: Quotient to output
1155 __ LoadAndTestP_ExtendSrc(r1, r1);
1156 break;
1157
1158 case kS390_DivFloat:
1159 // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
1160 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1161 __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
1162 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1163 __ debr(i.OutputDoubleRegister(), kScratchDoubleReg);
1164 } else {
1165 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1166 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1167 __ debr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1168 }
1169 break;
1170 case kS390_DivDouble:
1171 // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
1172 if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1173 __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
1174 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1175 __ ddbr(i.OutputDoubleRegister(), kScratchDoubleReg);
1176 } else {
1177 if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1178 __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1179 __ ddbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1180 }
1181 break;
1182 case kS390_Mod32:
1183 ASSEMBLE_MODULO(dr, srda);
1184 break;
1185 case kS390_ModU32:
1186 ASSEMBLE_MODULO(dlr, srdl);
1187 break;
1188#if V8_TARGET_ARCH_S390X
1189 case kS390_Mod64:
1190 __ LoadRR(r1, i.InputRegister(0));
1191 __ dsgr(r0, i.InputRegister(1)); // R1: Dividend
1192 __ ltgr(i.OutputRegister(), r0); // Copy R0: Remainder to output
1193 break;
1194 case kS390_ModU64:
1195 __ LoadRR(r1, i.InputRegister(0));
1196 __ LoadImmP(r0, Operand::Zero());
1197 __ dlgr(r0, i.InputRegister(1)); // R0:R1: Dividend
1198 __ ltgr(i.OutputRegister(), r0); // Copy R0: Remainder to output
1199 break;
1200#endif
1201 case kS390_AbsFloat:
1202 __ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1203 break;
1204 case kS390_SqrtFloat:
1205 ASSEMBLE_FLOAT_UNOP(sqebr);
1206 break;
1207 case kS390_FloorFloat:
1208 __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1209 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
1210 break;
1211 case kS390_CeilFloat:
1212 __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1213 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
1214 break;
1215 case kS390_TruncateFloat:
1216 __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1217 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
1218 break;
1219 // Double operations
1220 case kS390_ModDouble:
1221 ASSEMBLE_FLOAT_MODULO();
1222 break;
1223 case kS390_Neg:
1224 __ LoadComplementRR(i.OutputRegister(), i.InputRegister(0));
1225 break;
1226 case kS390_MaxDouble:
1227 ASSEMBLE_FLOAT_MAX(kScratchDoubleReg, kScratchReg);
1228 break;
1229 case kS390_MinDouble:
1230 ASSEMBLE_FLOAT_MIN(kScratchDoubleReg, kScratchReg);
1231 break;
1232 case kS390_AbsDouble:
1233 __ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1234 break;
1235 case kS390_SqrtDouble:
1236 ASSEMBLE_FLOAT_UNOP(sqdbr);
1237 break;
1238 case kS390_FloorDouble:
1239 __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1240 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
1241 break;
1242 case kS390_CeilDouble:
1243 __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1244 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
1245 break;
1246 case kS390_TruncateDouble:
1247 __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1248 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
1249 break;
1250 case kS390_RoundDouble:
1251 __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1252 v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0);
1253 break;
1254 case kS390_NegDouble:
1255 ASSEMBLE_FLOAT_UNOP(lcdbr);
1256 break;
1257 case kS390_Cntlz32: {
1258 __ llgfr(i.OutputRegister(), i.InputRegister(0));
1259 __ flogr(r0, i.OutputRegister());
1260 __ LoadRR(i.OutputRegister(), r0);
1261 __ SubP(i.OutputRegister(), Operand(32));
1262 } break;
1263#if V8_TARGET_ARCH_S390X
1264 case kS390_Cntlz64: {
1265 __ flogr(r0, i.InputRegister(0));
1266 __ LoadRR(i.OutputRegister(), r0);
1267 } break;
1268#endif
1269 case kS390_Popcnt32:
1270 __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
1271 break;
1272#if V8_TARGET_ARCH_S390X
1273 case kS390_Popcnt64:
1274 __ Popcnt64(i.OutputRegister(), i.InputRegister(0));
1275 break;
1276#endif
1277 case kS390_Cmp32:
1278 ASSEMBLE_COMPARE(Cmp32, CmpLogical32);
1279 break;
1280#if V8_TARGET_ARCH_S390X
1281 case kS390_Cmp64:
1282 ASSEMBLE_COMPARE(CmpP, CmpLogicalP);
1283 break;
1284#endif
1285 case kS390_CmpFloat:
1286 __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1287 break;
1288 case kS390_CmpDouble:
1289 __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1290 break;
1291 case kS390_Tst32:
1292 if (HasRegisterInput(instr, 1)) {
1293 __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
1294 } else {
1295 __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
1296 }
1297 __ LoadAndTestP_ExtendSrc(r0, r0);
1298 break;
1299#if V8_TARGET_ARCH_S390X
1300 case kS390_Tst64:
1301 if (HasRegisterInput(instr, 1)) {
1302 __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
1303 } else {
1304 __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
1305 }
1306 break;
1307#endif
1308 case kS390_Push:
1309 if (instr->InputAt(0)->IsDoubleRegister()) {
1310 __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1311 __ lay(sp, MemOperand(sp, -kDoubleSize));
1312 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1313 } else {
1314 __ Push(i.InputRegister(0));
1315 frame_access_state()->IncreaseSPDelta(1);
1316 }
1317 break;
1318 case kS390_PushFrame: {
1319 int num_slots = i.InputInt32(1);
1320 if (instr->InputAt(0)->IsDoubleRegister()) {
1321 __ StoreDouble(i.InputDoubleRegister(0),
1322 MemOperand(sp, -num_slots * kPointerSize));
1323 } else {
1324 __ StoreP(i.InputRegister(0),
1325 MemOperand(sp, -num_slots * kPointerSize));
1326 }
1327 __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
1328 break;
1329 }
1330 case kS390_StoreToStackSlot: {
1331 int slot = i.InputInt32(1);
1332 if (instr->InputAt(0)->IsDoubleRegister()) {
1333 __ StoreDouble(i.InputDoubleRegister(0),
1334 MemOperand(sp, slot * kPointerSize));
1335 } else {
1336 __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
1337 }
1338 break;
1339 }
1340 case kS390_ExtendSignWord8:
1341#if V8_TARGET_ARCH_S390X
1342 __ lgbr(i.OutputRegister(), i.InputRegister(0));
1343#else
1344 __ lbr(i.OutputRegister(), i.InputRegister(0));
1345#endif
1346 break;
1347 case kS390_ExtendSignWord16:
1348#if V8_TARGET_ARCH_S390X
1349 __ lghr(i.OutputRegister(), i.InputRegister(0));
1350#else
1351 __ lhr(i.OutputRegister(), i.InputRegister(0));
1352#endif
1353 break;
1354#if V8_TARGET_ARCH_S390X
1355 case kS390_ExtendSignWord32:
1356 __ lgfr(i.OutputRegister(), i.InputRegister(0));
1357 break;
1358 case kS390_Uint32ToUint64:
1359 // Zero extend
1360 __ llgfr(i.OutputRegister(), i.InputRegister(0));
1361 break;
1362 case kS390_Int64ToInt32:
1363 // sign extend
1364 __ lgfr(i.OutputRegister(), i.InputRegister(0));
1365 break;
1366 case kS390_Int64ToFloat32:
1367 __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
1368 break;
1369 case kS390_Int64ToDouble:
1370 __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
1371 break;
1372 case kS390_Uint64ToFloat32:
1373 __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
1374 i.OutputDoubleRegister());
1375 break;
1376 case kS390_Uint64ToDouble:
1377 __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
1378 i.OutputDoubleRegister());
1379 break;
1380#endif
1381 case kS390_Int32ToFloat32:
1382 __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
1383 break;
1384 case kS390_Int32ToDouble:
1385 __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
1386 break;
1387 case kS390_Uint32ToFloat32:
1388 __ ConvertUnsignedIntToFloat(i.InputRegister(0),
1389 i.OutputDoubleRegister());
1390 break;
1391 case kS390_Uint32ToDouble:
1392 __ ConvertUnsignedIntToDouble(i.InputRegister(0),
1393 i.OutputDoubleRegister());
1394 break;
1395 case kS390_DoubleToInt32:
1396 case kS390_DoubleToUint32:
1397 case kS390_DoubleToInt64: {
1398#if V8_TARGET_ARCH_S390X
1399 bool check_conversion =
1400 (opcode == kS390_DoubleToInt64 && i.OutputCount() > 1);
1401#endif
1402 __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
1403#if !V8_TARGET_ARCH_S390X
1404 kScratchReg,
1405#endif
1406 i.OutputRegister(0), kScratchDoubleReg);
1407#if V8_TARGET_ARCH_S390X
1408 if (check_conversion) {
1409 Label conversion_done;
1410 __ LoadImmP(i.OutputRegister(1), Operand::Zero());
1411 __ b(Condition(1), &conversion_done); // special case
1412 __ LoadImmP(i.OutputRegister(1), Operand(1));
1413 __ bind(&conversion_done);
1414 }
1415#endif
1416 break;
1417 }
1418 case kS390_Float32ToInt32: {
1419 bool check_conversion = (i.OutputCount() > 1);
1420 __ ConvertFloat32ToInt32(i.InputDoubleRegister(0), i.OutputRegister(0),
1421 kScratchDoubleReg);
1422 if (check_conversion) {
1423 Label conversion_done;
1424 __ LoadImmP(i.OutputRegister(1), Operand::Zero());
1425 __ b(Condition(1), &conversion_done); // special case
1426 __ LoadImmP(i.OutputRegister(1), Operand(1));
1427 __ bind(&conversion_done);
1428 }
1429 break;
1430 }
1431 case kS390_Float32ToUint32: {
1432 bool check_conversion = (i.OutputCount() > 1);
1433 __ ConvertFloat32ToUnsignedInt32(i.InputDoubleRegister(0),
1434 i.OutputRegister(0), kScratchDoubleReg);
1435 if (check_conversion) {
1436 Label conversion_done;
1437 __ LoadImmP(i.OutputRegister(1), Operand::Zero());
1438 __ b(Condition(1), &conversion_done); // special case
1439 __ LoadImmP(i.OutputRegister(1), Operand(1));
1440 __ bind(&conversion_done);
1441 }
1442 break;
1443 }
1444#if V8_TARGET_ARCH_S390X
1445 case kS390_Float32ToUint64: {
1446 bool check_conversion = (i.OutputCount() > 1);
1447 __ ConvertFloat32ToUnsignedInt64(i.InputDoubleRegister(0),
1448 i.OutputRegister(0), kScratchDoubleReg);
1449 if (check_conversion) {
1450 Label conversion_done;
1451 __ LoadImmP(i.OutputRegister(1), Operand::Zero());
1452 __ b(Condition(1), &conversion_done); // special case
1453 __ LoadImmP(i.OutputRegister(1), Operand(1));
1454 __ bind(&conversion_done);
1455 }
1456 break;
1457 }
1458#endif
1459 case kS390_Float32ToInt64: {
1460#if V8_TARGET_ARCH_S390X
1461 bool check_conversion =
1462 (opcode == kS390_Float32ToInt64 && i.OutputCount() > 1);
1463#endif
1464 __ ConvertFloat32ToInt64(i.InputDoubleRegister(0),
1465#if !V8_TARGET_ARCH_S390X
1466 kScratchReg,
1467#endif
1468 i.OutputRegister(0), kScratchDoubleReg);
1469#if V8_TARGET_ARCH_S390X
1470 if (check_conversion) {
1471 Label conversion_done;
1472 __ LoadImmP(i.OutputRegister(1), Operand::Zero());
1473 __ b(Condition(1), &conversion_done); // special case
1474 __ LoadImmP(i.OutputRegister(1), Operand(1));
1475 __ bind(&conversion_done);
1476 }
1477#endif
1478 break;
1479 }
1480#if V8_TARGET_ARCH_S390X
1481 case kS390_DoubleToUint64: {
1482 bool check_conversion = (i.OutputCount() > 1);
1483 __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
1484 i.OutputRegister(0), kScratchDoubleReg);
1485 if (check_conversion) {
1486 Label conversion_done;
1487 __ LoadImmP(i.OutputRegister(1), Operand::Zero());
1488 __ b(Condition(1), &conversion_done); // special case
1489 __ LoadImmP(i.OutputRegister(1), Operand(1));
1490 __ bind(&conversion_done);
1491 }
1492 break;
1493 }
1494#endif
1495 case kS390_DoubleToFloat32:
1496 __ ledbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1497 break;
1498 case kS390_Float32ToDouble:
1499 __ ldebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1500 break;
1501 case kS390_DoubleExtractLowWord32:
1502 // TODO(john.yan): this can cause problem when interrupting,
1503 // use freg->greg instruction
1504 __ stdy(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1505 __ LoadlW(i.OutputRegister(),
1506 MemOperand(sp, -kDoubleSize + Register::kMantissaOffset));
1507 break;
1508 case kS390_DoubleExtractHighWord32:
1509 // TODO(john.yan): this can cause problem when interrupting,
1510 // use freg->greg instruction
1511 __ stdy(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1512 __ LoadlW(i.OutputRegister(),
1513 MemOperand(sp, -kDoubleSize + Register::kExponentOffset));
1514 break;
1515 case kS390_DoubleInsertLowWord32:
1516 __ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1));
1517 break;
1518 case kS390_DoubleInsertHighWord32:
1519 __ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1520 break;
1521 case kS390_DoubleConstruct:
1522// TODO(john.yan): this can cause problem when interrupting,
1523// use greg->freg instruction
1524#if V8_TARGET_LITTLE_ENDIAN
1525 __ StoreW(i.InputRegister(0), MemOperand(sp, -kDoubleSize / 2));
1526 __ StoreW(i.InputRegister(1), MemOperand(sp, -kDoubleSize));
1527#else
1528 __ StoreW(i.InputRegister(1), MemOperand(sp, -kDoubleSize / 2));
1529 __ StoreW(i.InputRegister(0), MemOperand(sp, -kDoubleSize));
1530#endif
1531 __ ldy(i.OutputDoubleRegister(), MemOperand(sp, -kDoubleSize));
1532 break;
1533 case kS390_LoadWordS8:
1534 ASSEMBLE_LOAD_INTEGER(LoadlB);
1535#if V8_TARGET_ARCH_S390X
1536 __ lgbr(i.OutputRegister(), i.OutputRegister());
1537#else
1538 __ lbr(i.OutputRegister(), i.OutputRegister());
1539#endif
1540 break;
1541 case kS390_BitcastFloat32ToInt32:
1542 __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
1543 break;
1544 case kS390_BitcastInt32ToFloat32:
1545 __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
1546 break;
1547#if V8_TARGET_ARCH_S390X
1548 case kS390_BitcastDoubleToInt64:
1549 __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
1550 break;
1551 case kS390_BitcastInt64ToDouble:
1552 __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
1553 break;
1554#endif
1555 case kS390_LoadWordU8:
1556 ASSEMBLE_LOAD_INTEGER(LoadlB);
1557 break;
1558 case kS390_LoadWordU16:
1559 ASSEMBLE_LOAD_INTEGER(LoadLogicalHalfWordP);
1560 break;
1561 case kS390_LoadWordS16:
1562 ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
1563 break;
1564 case kS390_LoadWordS32:
1565 ASSEMBLE_LOAD_INTEGER(LoadW);
1566 break;
1567#if V8_TARGET_ARCH_S390X
1568 case kS390_LoadWord64:
1569 ASSEMBLE_LOAD_INTEGER(lg);
1570 break;
1571#endif
1572 case kS390_LoadFloat32:
1573 ASSEMBLE_LOAD_FLOAT(LoadFloat32);
1574 break;
1575 case kS390_LoadDouble:
1576 ASSEMBLE_LOAD_FLOAT(LoadDouble);
1577 break;
1578 case kS390_StoreWord8:
1579 ASSEMBLE_STORE_INTEGER(StoreByte);
1580 break;
1581 case kS390_StoreWord16:
1582 ASSEMBLE_STORE_INTEGER(StoreHalfWord);
1583 break;
1584 case kS390_StoreWord32:
1585 ASSEMBLE_STORE_INTEGER(StoreW);
1586 break;
1587#if V8_TARGET_ARCH_S390X
1588 case kS390_StoreWord64:
1589 ASSEMBLE_STORE_INTEGER(StoreP);
1590 break;
1591#endif
1592 case kS390_StoreFloat32:
1593 ASSEMBLE_STORE_FLOAT32();
1594 break;
1595 case kS390_StoreDouble:
1596 ASSEMBLE_STORE_DOUBLE();
1597 break;
1598 case kCheckedLoadInt8:
1599 ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
1600#if V8_TARGET_ARCH_S390X
1601 __ lgbr(i.OutputRegister(), i.OutputRegister());
1602#else
1603 __ lbr(i.OutputRegister(), i.OutputRegister());
1604#endif
1605 break;
1606 case kCheckedLoadUint8:
1607 ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
1608 break;
1609 case kCheckedLoadInt16:
1610 ASSEMBLE_CHECKED_LOAD_INTEGER(LoadHalfWordP);
1611 break;
1612 case kCheckedLoadUint16:
1613 ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
1614 break;
1615 case kCheckedLoadWord32:
1616 ASSEMBLE_CHECKED_LOAD_INTEGER(LoadW);
1617 break;
1618 case kCheckedLoadWord64:
1619#if V8_TARGET_ARCH_S390X
1620 ASSEMBLE_CHECKED_LOAD_INTEGER(LoadP);
1621#else
1622 UNREACHABLE();
1623#endif
1624 break;
1625 case kCheckedLoadFloat32:
1626 ASSEMBLE_CHECKED_LOAD_FLOAT(LoadFloat32, 32);
1627 break;
1628 case kCheckedLoadFloat64:
1629 ASSEMBLE_CHECKED_LOAD_FLOAT(LoadDouble, 64);
1630 break;
1631 case kCheckedStoreWord8:
1632 ASSEMBLE_CHECKED_STORE_INTEGER(StoreByte);
1633 break;
1634 case kCheckedStoreWord16:
1635 ASSEMBLE_CHECKED_STORE_INTEGER(StoreHalfWord);
1636 break;
1637 case kCheckedStoreWord32:
1638 ASSEMBLE_CHECKED_STORE_INTEGER(StoreW);
1639 break;
1640 case kCheckedStoreWord64:
1641#if V8_TARGET_ARCH_S390X
1642 ASSEMBLE_CHECKED_STORE_INTEGER(StoreP);
1643#else
1644 UNREACHABLE();
1645#endif
1646 break;
1647 case kCheckedStoreFloat32:
1648 ASSEMBLE_CHECKED_STORE_FLOAT32();
1649 break;
1650 case kCheckedStoreFloat64:
1651 ASSEMBLE_CHECKED_STORE_DOUBLE();
1652 break;
1653 default:
1654 UNREACHABLE();
1655 break;
1656 }
1657} // NOLINT(readability/fn_size)
1658
1659// Assembles branches after an instruction.
1660void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1661 S390OperandConverter i(this, instr);
1662 Label* tlabel = branch->true_label;
1663 Label* flabel = branch->false_label;
1664 ArchOpcode op = instr->arch_opcode();
1665 FlagsCondition condition = branch->condition;
1666
1667 Condition cond = FlagsConditionToCondition(condition, op);
1668 if (op == kS390_CmpDouble) {
1669 // check for unordered if necessary
1670 // Branching to flabel/tlabel according to what's expected by tests
1671 if (cond == le || cond == eq || cond == lt) {
1672 __ bunordered(flabel);
1673 } else if (cond == gt || cond == ne || cond == ge) {
1674 __ bunordered(tlabel);
1675 }
1676 }
1677 __ b(cond, tlabel);
1678 if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
1679}
1680
1681void CodeGenerator::AssembleArchJump(RpoNumber target) {
1682 if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
1683}
1684
1685// Assembles boolean materializations after an instruction.
1686void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1687 FlagsCondition condition) {
1688 S390OperandConverter i(this, instr);
1689 Label done;
1690 ArchOpcode op = instr->arch_opcode();
1691 bool check_unordered = (op == kS390_CmpDouble || kS390_CmpFloat);
1692
1693 // Overflow checked for add/sub only.
1694 DCHECK((condition != kOverflow && condition != kNotOverflow) ||
1695 (op == kS390_AddWithOverflow32 || op == kS390_SubWithOverflow32) ||
1696 (op == kS390_Add || op == kS390_Sub));
1697
1698 // Materialize a full 32-bit 1 or 0 value. The result register is always the
1699 // last output of the instruction.
1700 DCHECK_NE(0u, instr->OutputCount());
1701 Register reg = i.OutputRegister(instr->OutputCount() - 1);
1702 Condition cond = FlagsConditionToCondition(condition, op);
1703 switch (cond) {
1704 case ne:
1705 case ge:
1706 case gt:
1707 if (check_unordered) {
1708 __ LoadImmP(reg, Operand(1));
1709 __ LoadImmP(kScratchReg, Operand::Zero());
1710 __ bunordered(&done);
1711 Label cond_true;
1712 __ b(cond, &cond_true, Label::kNear);
1713 __ LoadRR(reg, kScratchReg);
1714 __ bind(&cond_true);
1715 } else {
1716 Label cond_true, done_here;
1717 __ LoadImmP(reg, Operand(1));
1718 __ b(cond, &cond_true, Label::kNear);
1719 __ LoadImmP(reg, Operand::Zero());
1720 __ bind(&cond_true);
1721 }
1722 break;
1723 case eq:
1724 case lt:
1725 case le:
1726 if (check_unordered) {
1727 __ LoadImmP(reg, Operand::Zero());
1728 __ LoadImmP(kScratchReg, Operand(1));
1729 __ bunordered(&done);
1730 Label cond_false;
1731 __ b(NegateCondition(cond), &cond_false, Label::kNear);
1732 __ LoadRR(reg, kScratchReg);
1733 __ bind(&cond_false);
1734 } else {
1735 __ LoadImmP(reg, Operand::Zero());
1736 Label cond_false;
1737 __ b(NegateCondition(cond), &cond_false, Label::kNear);
1738 __ LoadImmP(reg, Operand(1));
1739 __ bind(&cond_false);
1740 }
1741 break;
1742 default:
1743 UNREACHABLE();
1744 break;
1745 }
1746 __ bind(&done);
1747}
1748
1749void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1750 S390OperandConverter i(this, instr);
1751 Register input = i.InputRegister(0);
1752 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1753 __ CmpP(input, Operand(i.InputInt32(index + 0)));
1754 __ beq(GetLabel(i.InputRpo(index + 1)));
1755 }
1756 AssembleArchJump(i.InputRpo(1));
1757}
1758
1759void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1760 S390OperandConverter i(this, instr);
1761 Register input = i.InputRegister(0);
1762 int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1763 Label** cases = zone()->NewArray<Label*>(case_count);
1764 for (int32_t index = 0; index < case_count; ++index) {
1765 cases[index] = GetLabel(i.InputRpo(index + 2));
1766 }
1767 Label* const table = AddJumpTable(cases, case_count);
1768 __ CmpLogicalP(input, Operand(case_count));
1769 __ bge(GetLabel(i.InputRpo(1)));
1770 __ larl(kScratchReg, table);
1771 __ ShiftLeftP(r1, input, Operand(kPointerSizeLog2));
1772 __ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
1773 __ Jump(kScratchReg);
1774}
1775
1776void CodeGenerator::AssembleDeoptimizerCall(
1777 int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1778 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1779 isolate(), deoptimization_id, bailout_type);
1780 // TODO(turbofan): We should be able to generate better code by sharing the
1781 // actual final call site and just bl'ing to it here, similar to what we do
1782 // in the lithium backend.
1783 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1784}
1785
1786void CodeGenerator::AssemblePrologue() {
1787 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1788
1789 if (frame_access_state()->has_frame()) {
1790 if (descriptor->IsCFunctionCall()) {
1791 __ Push(r14, fp);
1792 __ LoadRR(fp, sp);
1793 } else if (descriptor->IsJSFunctionCall()) {
1794 __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
1795 } else {
1796 StackFrame::Type type = info()->GetOutputStackFrameType();
1797 // TODO(mbrandy): Detect cases where ip is the entrypoint (for
1798 // efficient intialization of the constant pool pointer register).
1799 __ StubPrologue(type);
1800 }
1801 }
1802
1803 int stack_shrink_slots = frame()->GetSpillSlotCount();
1804 if (info()->is_osr()) {
1805 // TurboFan OSR-compiled functions cannot be entered directly.
1806 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1807
1808 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1809 // frame is still on the stack. Optimized code uses OSR values directly from
1810 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1811 // remaining stack slots.
1812 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1813 osr_pc_offset_ = __ pc_offset();
1814 stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1815 }
1816
1817 const RegList double_saves = descriptor->CalleeSavedFPRegisters();
1818 if (double_saves != 0) {
1819 stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
1820 }
1821 if (stack_shrink_slots > 0) {
1822 __ lay(sp, MemOperand(sp, -stack_shrink_slots * kPointerSize));
1823 }
1824
1825 // Save callee-saved Double registers.
1826 if (double_saves != 0) {
1827 __ MultiPushDoubles(double_saves);
1828 DCHECK(kNumCalleeSavedDoubles ==
1829 base::bits::CountPopulation32(double_saves));
1830 frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
1831 (kDoubleSize / kPointerSize));
1832 }
1833
1834 // Save callee-saved registers.
1835 const RegList saves = descriptor->CalleeSavedRegisters();
1836 if (saves != 0) {
1837 __ MultiPush(saves);
1838 // register save area does not include the fp or constant pool pointer.
1839 const int num_saves =
1840 kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
1841 DCHECK(num_saves == base::bits::CountPopulation32(saves));
1842 frame()->AllocateSavedCalleeRegisterSlots(num_saves);
1843 }
1844}
1845
1846void CodeGenerator::AssembleReturn() {
1847 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1848 int pop_count = static_cast<int>(descriptor->StackParameterCount());
1849
1850 // Restore registers.
1851 const RegList saves = descriptor->CalleeSavedRegisters();
1852 if (saves != 0) {
1853 __ MultiPop(saves);
1854 }
1855
1856 // Restore double registers.
1857 const RegList double_saves = descriptor->CalleeSavedFPRegisters();
1858 if (double_saves != 0) {
1859 __ MultiPopDoubles(double_saves);
1860 }
1861
1862 if (descriptor->IsCFunctionCall()) {
1863 AssembleDeconstructFrame();
1864 } else if (frame_access_state()->has_frame()) {
1865 // Canonicalize JSFunction return sites for now.
1866 if (return_label_.is_bound()) {
1867 __ b(&return_label_);
1868 return;
1869 } else {
1870 __ bind(&return_label_);
1871 AssembleDeconstructFrame();
1872 }
1873 }
1874 __ Ret(pop_count);
1875}
1876
1877void CodeGenerator::AssembleMove(InstructionOperand* source,
1878 InstructionOperand* destination) {
1879 S390OperandConverter g(this, nullptr);
1880 // Dispatch on the source and destination operand kinds. Not all
1881 // combinations are possible.
1882 if (source->IsRegister()) {
1883 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1884 Register src = g.ToRegister(source);
1885 if (destination->IsRegister()) {
1886 __ Move(g.ToRegister(destination), src);
1887 } else {
1888 __ StoreP(src, g.ToMemOperand(destination));
1889 }
1890 } else if (source->IsStackSlot()) {
1891 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1892 MemOperand src = g.ToMemOperand(source);
1893 if (destination->IsRegister()) {
1894 __ LoadP(g.ToRegister(destination), src);
1895 } else {
1896 Register temp = kScratchReg;
1897 __ LoadP(temp, src, r0);
1898 __ StoreP(temp, g.ToMemOperand(destination));
1899 }
1900 } else if (source->IsConstant()) {
1901 Constant src = g.ToConstant(source);
1902 if (destination->IsRegister() || destination->IsStackSlot()) {
1903 Register dst =
1904 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1905 switch (src.type()) {
1906 case Constant::kInt32:
1907 __ mov(dst, Operand(src.ToInt32()));
1908 break;
1909 case Constant::kInt64:
1910 __ mov(dst, Operand(src.ToInt64()));
1911 break;
1912 case Constant::kFloat32:
1913 __ Move(dst,
1914 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1915 break;
1916 case Constant::kFloat64:
1917 __ Move(dst,
1918 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1919 break;
1920 case Constant::kExternalReference:
1921 __ mov(dst, Operand(src.ToExternalReference()));
1922 break;
1923 case Constant::kHeapObject: {
1924 Handle<HeapObject> src_object = src.ToHeapObject();
1925 Heap::RootListIndex index;
1926 int slot;
1927 if (IsMaterializableFromFrame(src_object, &slot)) {
1928 __ LoadP(dst, g.SlotToMemOperand(slot));
1929 } else if (IsMaterializableFromRoot(src_object, &index)) {
1930 __ LoadRoot(dst, index);
1931 } else {
1932 __ Move(dst, src_object);
1933 }
1934 break;
1935 }
1936 case Constant::kRpoNumber:
1937 UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
1938 break;
1939 }
1940 if (destination->IsStackSlot()) {
1941 __ StoreP(dst, g.ToMemOperand(destination), r0);
1942 }
1943 } else {
1944 DoubleRegister dst = destination->IsDoubleRegister()
1945 ? g.ToDoubleRegister(destination)
1946 : kScratchDoubleReg;
1947 double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
1948 : src.ToFloat64();
1949 if (src.type() == Constant::kFloat32) {
1950 __ LoadFloat32Literal(dst, src.ToFloat32(), kScratchReg);
1951 } else {
1952 __ LoadDoubleLiteral(dst, value, kScratchReg);
1953 }
1954
1955 if (destination->IsDoubleStackSlot()) {
1956 __ StoreDouble(dst, g.ToMemOperand(destination));
1957 }
1958 }
1959 } else if (source->IsDoubleRegister()) {
1960 DoubleRegister src = g.ToDoubleRegister(source);
1961 if (destination->IsDoubleRegister()) {
1962 DoubleRegister dst = g.ToDoubleRegister(destination);
1963 __ Move(dst, src);
1964 } else {
1965 DCHECK(destination->IsDoubleStackSlot());
1966 __ StoreDouble(src, g.ToMemOperand(destination));
1967 }
1968 } else if (source->IsDoubleStackSlot()) {
1969 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1970 MemOperand src = g.ToMemOperand(source);
1971 if (destination->IsDoubleRegister()) {
1972 __ LoadDouble(g.ToDoubleRegister(destination), src);
1973 } else {
1974 DoubleRegister temp = kScratchDoubleReg;
1975 __ LoadDouble(temp, src);
1976 __ StoreDouble(temp, g.ToMemOperand(destination));
1977 }
1978 } else {
1979 UNREACHABLE();
1980 }
1981}
1982
1983void CodeGenerator::AssembleSwap(InstructionOperand* source,
1984 InstructionOperand* destination) {
1985 S390OperandConverter g(this, nullptr);
1986 // Dispatch on the source and destination operand kinds. Not all
1987 // combinations are possible.
1988 if (source->IsRegister()) {
1989 // Register-register.
1990 Register temp = kScratchReg;
1991 Register src = g.ToRegister(source);
1992 if (destination->IsRegister()) {
1993 Register dst = g.ToRegister(destination);
1994 __ LoadRR(temp, src);
1995 __ LoadRR(src, dst);
1996 __ LoadRR(dst, temp);
1997 } else {
1998 DCHECK(destination->IsStackSlot());
1999 MemOperand dst = g.ToMemOperand(destination);
2000 __ LoadRR(temp, src);
2001 __ LoadP(src, dst);
2002 __ StoreP(temp, dst);
2003 }
2004#if V8_TARGET_ARCH_S390X
2005 } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
2006#else
2007 } else if (source->IsStackSlot()) {
2008 DCHECK(destination->IsStackSlot());
2009#endif
2010 Register temp_0 = kScratchReg;
2011 Register temp_1 = r0;
2012 MemOperand src = g.ToMemOperand(source);
2013 MemOperand dst = g.ToMemOperand(destination);
2014 __ LoadP(temp_0, src);
2015 __ LoadP(temp_1, dst);
2016 __ StoreP(temp_0, dst);
2017 __ StoreP(temp_1, src);
2018 } else if (source->IsDoubleRegister()) {
2019 DoubleRegister temp = kScratchDoubleReg;
2020 DoubleRegister src = g.ToDoubleRegister(source);
2021 if (destination->IsDoubleRegister()) {
2022 DoubleRegister dst = g.ToDoubleRegister(destination);
2023 __ ldr(temp, src);
2024 __ ldr(src, dst);
2025 __ ldr(dst, temp);
2026 } else {
2027 DCHECK(destination->IsDoubleStackSlot());
2028 MemOperand dst = g.ToMemOperand(destination);
2029 __ ldr(temp, src);
2030 __ LoadDouble(src, dst);
2031 __ StoreDouble(temp, dst);
2032 }
2033#if !V8_TARGET_ARCH_S390X
2034 } else if (source->IsDoubleStackSlot()) {
2035 DCHECK(destination->IsDoubleStackSlot());
2036 DoubleRegister temp_0 = kScratchDoubleReg;
2037 DoubleRegister temp_1 = d0;
2038 MemOperand src = g.ToMemOperand(source);
2039 MemOperand dst = g.ToMemOperand(destination);
2040 // TODO(joransiu): MVC opportunity
2041 __ LoadDouble(temp_0, src);
2042 __ LoadDouble(temp_1, dst);
2043 __ StoreDouble(temp_0, dst);
2044 __ StoreDouble(temp_1, src);
2045#endif
2046 } else {
2047 // No other combinations are possible.
2048 UNREACHABLE();
2049 }
2050}
2051
2052void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2053 for (size_t index = 0; index < target_count; ++index) {
2054 __ emit_label_addr(targets[index]);
2055 }
2056}
2057
2058void CodeGenerator::AddNopForSmiCodeInlining() {
2059 // We do not insert nops for inlined Smi code.
2060}
2061
2062void CodeGenerator::EnsureSpaceForLazyDeopt() {
2063 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2064 return;
2065 }
2066
2067 int space_needed = Deoptimizer::patch_size();
2068 // Ensure that we have enough space after the previous lazy-bailout
2069 // instruction for patching the code here.
2070 int current_pc = masm()->pc_offset();
2071 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2072 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2073 DCHECK_EQ(0, padding_size % 2);
2074 while (padding_size > 0) {
2075 __ nop();
2076 padding_size -= 2;
2077 }
2078 }
2079}
2080
2081#undef __
2082
2083} // namespace compiler
2084} // namespace internal
2085} // namespace v8