blob: dd92837bbde21bbc62857455cdd77702b76c3c28 [file] [log] [blame]
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6#include "src/compiler/code-generator-impl.h"
7#include "src/compiler/gap-resolver.h"
8#include "src/compiler/node-matchers.h"
9#include "src/compiler/node-properties-inl.h"
10#include "src/mips/macro-assembler-mips.h"
11#include "src/scopes.h"
12
13namespace v8 {
14namespace internal {
15namespace compiler {
16
17#define __ masm()->
18
19
20// TODO(plind): Possibly avoid using these lithium names.
21#define kScratchReg kLithiumScratchReg
22#define kCompareReg kLithiumScratchReg2
23#define kScratchReg2 kLithiumScratchReg2
24#define kScratchDoubleReg kLithiumScratchDouble
25
26
27// TODO(plind): consider renaming these macros.
28#define TRACE_MSG(msg) \
29 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
30 __LINE__)
31
32#define TRACE_UNIMPL() \
33 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
34 __LINE__)
35
36
37// Adds Mips-specific methods to convert InstructionOperands.
38class MipsOperandConverter FINAL : public InstructionOperandConverter {
39 public:
40 MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
41 : InstructionOperandConverter(gen, instr) {}
42
43 FloatRegister OutputSingleRegister(int index = 0) {
44 return ToSingleRegister(instr_->OutputAt(index));
45 }
46
47 FloatRegister InputSingleRegister(int index) {
48 return ToSingleRegister(instr_->InputAt(index));
49 }
50
51 FloatRegister ToSingleRegister(InstructionOperand* op) {
52 // Single (Float) and Double register namespace is same on MIPS,
53 // both are typedefs of FPURegister.
54 return ToDoubleRegister(op);
55 }
56
57 Operand InputImmediate(int index) {
58 Constant constant = ToConstant(instr_->InputAt(index));
59 switch (constant.type()) {
60 case Constant::kInt32:
61 return Operand(constant.ToInt32());
62 case Constant::kFloat32:
63 return Operand(
64 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
65 case Constant::kFloat64:
66 return Operand(
67 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
68 case Constant::kInt64:
69 case Constant::kExternalReference:
70 case Constant::kHeapObject:
71 // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
72 // maybe not done on arm due to const pool ??
73 break;
74 case Constant::kRpoNumber:
75 UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
76 break;
77 }
78 UNREACHABLE();
79 return Operand(zero_reg);
80 }
81
82 Operand InputOperand(int index) {
83 InstructionOperand* op = instr_->InputAt(index);
84 if (op->IsRegister()) {
85 return Operand(ToRegister(op));
86 }
87 return InputImmediate(index);
88 }
89
90 MemOperand MemoryOperand(int* first_index) {
91 const int index = *first_index;
92 switch (AddressingModeField::decode(instr_->opcode())) {
93 case kMode_None:
94 break;
95 case kMode_MRI:
96 *first_index += 2;
97 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
98 case kMode_MRR:
99 // TODO(plind): r6 address mode, to be implemented ...
100 UNREACHABLE();
101 }
102 UNREACHABLE();
103 return MemOperand(no_reg);
104 }
105
106 MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
107
108 MemOperand ToMemOperand(InstructionOperand* op) const {
109 DCHECK(op != NULL);
110 DCHECK(!op->IsRegister());
111 DCHECK(!op->IsDoubleRegister());
112 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
113 // The linkage computes where all spill slots are located.
114 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
115 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
116 }
117};
118
119
120static inline bool HasRegisterInput(Instruction* instr, int index) {
121 return instr->InputAt(index)->IsRegister();
122}
123
124
125namespace {
126
127class OutOfLineLoadSingle FINAL : public OutOfLineCode {
128 public:
129 OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
130 : OutOfLineCode(gen), result_(result) {}
131
132 void Generate() FINAL {
133 __ Move(result_, std::numeric_limits<float>::quiet_NaN());
134 }
135
136 private:
137 FloatRegister const result_;
138};
139
140
141class OutOfLineLoadDouble FINAL : public OutOfLineCode {
142 public:
143 OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
144 : OutOfLineCode(gen), result_(result) {}
145
146 void Generate() FINAL {
147 __ Move(result_, std::numeric_limits<double>::quiet_NaN());
148 }
149
150 private:
151 DoubleRegister const result_;
152};
153
154
155class OutOfLineLoadInteger FINAL : public OutOfLineCode {
156 public:
157 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
158 : OutOfLineCode(gen), result_(result) {}
159
160 void Generate() FINAL { __ mov(result_, zero_reg); }
161
162 private:
163 Register const result_;
164};
165
166
167class OutOfLineRound : public OutOfLineCode {
168 public:
169 OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
170 : OutOfLineCode(gen), result_(result) {}
171
172 void Generate() FINAL {
173 // Handle rounding to zero case where sign has to be preserved.
174 // High bits of double input already in kScratchReg.
175 __ srl(at, kScratchReg, 31);
176 __ sll(at, at, 31);
177 __ Mthc1(at, result_);
178 }
179
180 private:
181 DoubleRegister const result_;
182};
183
184
185class OutOfLineTruncate FINAL : public OutOfLineRound {
186 public:
187 OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
188 : OutOfLineRound(gen, result) {}
189};
190
191
192class OutOfLineFloor FINAL : public OutOfLineRound {
193 public:
194 OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
195 : OutOfLineRound(gen, result) {}
196};
197
198
199class OutOfLineCeil FINAL : public OutOfLineRound {
200 public:
201 OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
202 : OutOfLineRound(gen, result) {}
203};
204
205} // namespace
206
207
208#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
209 do { \
210 auto result = i.Output##width##Register(); \
211 auto ool = new (zone()) OutOfLineLoad##width(this, result); \
212 if (instr->InputAt(0)->IsRegister()) { \
213 auto offset = i.InputRegister(0); \
214 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
215 __ addu(at, i.InputRegister(2), offset); \
216 __ asm_instr(result, MemOperand(at, 0)); \
217 } else { \
218 auto offset = i.InputOperand(0).immediate(); \
219 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
220 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
221 } \
222 __ bind(ool->exit()); \
223 } while (0)
224
225
226#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
227 do { \
228 auto result = i.OutputRegister(); \
229 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
230 if (instr->InputAt(0)->IsRegister()) { \
231 auto offset = i.InputRegister(0); \
232 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
233 __ addu(at, i.InputRegister(2), offset); \
234 __ asm_instr(result, MemOperand(at, 0)); \
235 } else { \
236 auto offset = i.InputOperand(0).immediate(); \
237 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
238 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
239 } \
240 __ bind(ool->exit()); \
241 } while (0)
242
243
244#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
245 do { \
246 Label done; \
247 if (instr->InputAt(0)->IsRegister()) { \
248 auto offset = i.InputRegister(0); \
249 auto value = i.Input##width##Register(2); \
250 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
251 __ addu(at, i.InputRegister(3), offset); \
252 __ asm_instr(value, MemOperand(at, 0)); \
253 } else { \
254 auto offset = i.InputOperand(0).immediate(); \
255 auto value = i.Input##width##Register(2); \
256 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
257 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
258 } \
259 __ bind(&done); \
260 } while (0)
261
262
263#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
264 do { \
265 Label done; \
266 if (instr->InputAt(0)->IsRegister()) { \
267 auto offset = i.InputRegister(0); \
268 auto value = i.InputRegister(2); \
269 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
270 __ addu(at, i.InputRegister(3), offset); \
271 __ asm_instr(value, MemOperand(at, 0)); \
272 } else { \
273 auto offset = i.InputOperand(0).immediate(); \
274 auto value = i.InputRegister(2); \
275 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
276 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
277 } \
278 __ bind(&done); \
279 } while (0)
280
281
282#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
283 do { \
284 auto ool = \
285 new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
286 Label done; \
287 __ Mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
288 __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
289 HeapNumber::kExponentBits); \
290 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
291 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
292 __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
293 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
294 __ Move(at, kScratchReg2, i.OutputDoubleRegister()); \
295 __ or_(at, at, kScratchReg2); \
296 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
297 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
298 __ bind(ool->exit()); \
299 __ bind(&done); \
300 } while (0)
301
302
303// Assembles an instruction after register allocation, producing machine code.
304void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
305 MipsOperandConverter i(this, instr);
306 InstructionCode opcode = instr->opcode();
307
308 switch (ArchOpcodeField::decode(opcode)) {
309 case kArchCallCodeObject: {
310 EnsureSpaceForLazyDeopt();
311 if (instr->InputAt(0)->IsImmediate()) {
312 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
313 RelocInfo::CODE_TARGET);
314 } else {
315 __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
316 __ Call(at);
317 }
318 AddSafepointAndDeopt(instr);
319 break;
320 }
321 case kArchCallJSFunction: {
322 EnsureSpaceForLazyDeopt();
323 Register func = i.InputRegister(0);
324 if (FLAG_debug_code) {
325 // Check the function's context matches the context argument.
326 __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
327 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
328 }
329
330 __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
331 __ Call(at);
332 AddSafepointAndDeopt(instr);
333 break;
334 }
335 case kArchJmp:
336 AssembleArchJump(i.InputRpo(0));
337 break;
338 case kArchNop:
339 // don't emit code for nops.
340 break;
341 case kArchRet:
342 AssembleReturn();
343 break;
344 case kArchStackPointer:
345 __ mov(i.OutputRegister(), sp);
346 break;
347 case kArchTruncateDoubleToI:
348 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
349 break;
350 case kMipsAdd:
351 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
352 break;
353 case kMipsAddOvf:
354 __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
355 i.InputOperand(1), kCompareReg, kScratchReg);
356 break;
357 case kMipsSub:
358 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
359 break;
360 case kMipsSubOvf:
361 __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
362 i.InputOperand(1), kCompareReg, kScratchReg);
363 break;
364 case kMipsMul:
365 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
366 break;
367 case kMipsMulHigh:
368 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
369 break;
370 case kMipsMulHighU:
371 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
372 break;
373 case kMipsDiv:
374 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
375 break;
376 case kMipsDivU:
377 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
378 break;
379 case kMipsMod:
380 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
381 break;
382 case kMipsModU:
383 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
384 break;
385 case kMipsAnd:
386 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
387 break;
388 case kMipsOr:
389 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
390 break;
391 case kMipsXor:
392 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
393 break;
394 case kMipsShl:
395 if (instr->InputAt(1)->IsRegister()) {
396 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
397 } else {
398 int32_t imm = i.InputOperand(1).immediate();
399 __ sll(i.OutputRegister(), i.InputRegister(0), imm);
400 }
401 break;
402 case kMipsShr:
403 if (instr->InputAt(1)->IsRegister()) {
404 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
405 } else {
406 int32_t imm = i.InputOperand(1).immediate();
407 __ srl(i.OutputRegister(), i.InputRegister(0), imm);
408 }
409 break;
410 case kMipsSar:
411 if (instr->InputAt(1)->IsRegister()) {
412 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
413 } else {
414 int32_t imm = i.InputOperand(1).immediate();
415 __ sra(i.OutputRegister(), i.InputRegister(0), imm);
416 }
417 break;
418 case kMipsRor:
419 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
420 break;
421 case kMipsTst:
422 // Pseudo-instruction used for tst/branch. No opcode emitted here.
423 break;
424 case kMipsCmp:
425 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
426 break;
427 case kMipsMov:
428 // TODO(plind): Should we combine mov/li like this, or use separate instr?
429 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
430 if (HasRegisterInput(instr, 0)) {
431 __ mov(i.OutputRegister(), i.InputRegister(0));
432 } else {
433 __ li(i.OutputRegister(), i.InputOperand(0));
434 }
435 break;
436
437 case kMipsCmpD:
438 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
439 break;
440 case kMipsAddD:
441 // TODO(plind): add special case: combine mult & add.
442 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
443 i.InputDoubleRegister(1));
444 break;
445 case kMipsSubD:
446 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
447 i.InputDoubleRegister(1));
448 break;
449 case kMipsMulD:
450 // TODO(plind): add special case: right op is -1.0, see arm port.
451 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
452 i.InputDoubleRegister(1));
453 break;
454 case kMipsDivD:
455 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
456 i.InputDoubleRegister(1));
457 break;
458 case kMipsModD: {
459 // TODO(bmeurer): We should really get rid of this special instruction,
460 // and generate a CallAddress instruction instead.
461 FrameScope scope(masm(), StackFrame::MANUAL);
462 __ PrepareCallCFunction(0, 2, kScratchReg);
463 __ MovToFloatParameters(i.InputDoubleRegister(0),
464 i.InputDoubleRegister(1));
465 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
466 0, 2);
467 // Move the result in the double result register.
468 __ MovFromFloatResult(i.OutputDoubleRegister());
469 break;
470 }
471 case kMipsFloat64Floor: {
472 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
473 break;
474 }
475 case kMipsFloat64Ceil: {
476 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
477 break;
478 }
479 case kMipsFloat64RoundTruncate: {
480 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
481 break;
482 }
483 case kMipsSqrtD: {
484 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
485 break;
486 }
487 case kMipsCvtSD: {
488 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
489 break;
490 }
491 case kMipsCvtDS: {
492 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
493 break;
494 }
495 case kMipsCvtDW: {
496 FPURegister scratch = kScratchDoubleReg;
497 __ mtc1(i.InputRegister(0), scratch);
498 __ cvt_d_w(i.OutputDoubleRegister(), scratch);
499 break;
500 }
501 case kMipsCvtDUw: {
502 FPURegister scratch = kScratchDoubleReg;
503 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
504 break;
505 }
506 case kMipsTruncWD: {
507 FPURegister scratch = kScratchDoubleReg;
508 // Other arches use round to zero here, so we follow.
509 __ trunc_w_d(scratch, i.InputDoubleRegister(0));
510 __ mfc1(i.OutputRegister(), scratch);
511 break;
512 }
513 case kMipsTruncUwD: {
514 FPURegister scratch = kScratchDoubleReg;
515 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
516 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
517 break;
518 }
519 // ... more basic instructions ...
520
521 case kMipsLbu:
522 __ lbu(i.OutputRegister(), i.MemoryOperand());
523 break;
524 case kMipsLb:
525 __ lb(i.OutputRegister(), i.MemoryOperand());
526 break;
527 case kMipsSb:
528 __ sb(i.InputRegister(2), i.MemoryOperand());
529 break;
530 case kMipsLhu:
531 __ lhu(i.OutputRegister(), i.MemoryOperand());
532 break;
533 case kMipsLh:
534 __ lh(i.OutputRegister(), i.MemoryOperand());
535 break;
536 case kMipsSh:
537 __ sh(i.InputRegister(2), i.MemoryOperand());
538 break;
539 case kMipsLw:
540 __ lw(i.OutputRegister(), i.MemoryOperand());
541 break;
542 case kMipsSw:
543 __ sw(i.InputRegister(2), i.MemoryOperand());
544 break;
545 case kMipsLwc1: {
546 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
547 break;
548 }
549 case kMipsSwc1: {
550 int index = 0;
551 MemOperand operand = i.MemoryOperand(&index);
552 __ swc1(i.InputSingleRegister(index), operand);
553 break;
554 }
555 case kMipsLdc1:
556 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
557 break;
558 case kMipsSdc1:
559 __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
560 break;
561 case kMipsPush:
562 __ Push(i.InputRegister(0));
563 break;
564 case kMipsStackClaim: {
565 int words = MiscField::decode(instr->opcode());
566 __ Subu(sp, sp, Operand(words << kPointerSizeLog2));
567 break;
568 }
569 case kMipsStoreToStackSlot: {
570 int slot = MiscField::decode(instr->opcode());
571 __ sw(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
572 break;
573 }
574 case kMipsStoreWriteBarrier: {
575 Register object = i.InputRegister(0);
576 Register index = i.InputRegister(1);
577 Register value = i.InputRegister(2);
578 __ addu(index, object, index);
579 __ sw(value, MemOperand(index));
580 SaveFPRegsMode mode =
581 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
582 RAStatus ra_status = kRAHasNotBeenSaved;
583 __ RecordWrite(object, index, value, ra_status, mode);
584 break;
585 }
586 case kCheckedLoadInt8:
587 ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
588 break;
589 case kCheckedLoadUint8:
590 ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
591 break;
592 case kCheckedLoadInt16:
593 ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
594 break;
595 case kCheckedLoadUint16:
596 ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
597 break;
598 case kCheckedLoadWord32:
599 ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
600 break;
601 case kCheckedLoadFloat32:
602 ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
603 break;
604 case kCheckedLoadFloat64:
605 ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
606 break;
607 case kCheckedStoreWord8:
608 ASSEMBLE_CHECKED_STORE_INTEGER(sb);
609 break;
610 case kCheckedStoreWord16:
611 ASSEMBLE_CHECKED_STORE_INTEGER(sh);
612 break;
613 case kCheckedStoreWord32:
614 ASSEMBLE_CHECKED_STORE_INTEGER(sw);
615 break;
616 case kCheckedStoreFloat32:
617 ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
618 break;
619 case kCheckedStoreFloat64:
620 ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
621 break;
622 }
623}
624
625
626#define UNSUPPORTED_COND(opcode, condition) \
627 OFStream out(stdout); \
628 out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
629 UNIMPLEMENTED();
630
631// Assembles branches after an instruction.
632void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
633 MipsOperandConverter i(this, instr);
634 Label* tlabel = branch->true_label;
635 Label* flabel = branch->false_label;
636 Condition cc = kNoCondition;
637
638 // MIPS does not have condition code flags, so compare and branch are
639 // implemented differently than on the other arch's. The compare operations
640 // emit mips pseudo-instructions, which are handled here by branch
641 // instructions that do the actual comparison. Essential that the input
642 // registers to compare pseudo-op are not modified before this branch op, as
643 // they are tested here.
644 // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
645 // not separated by other instructions.
646
647 if (instr->arch_opcode() == kMipsTst) {
648 switch (branch->condition) {
649 case kNotEqual:
650 cc = ne;
651 break;
652 case kEqual:
653 cc = eq;
654 break;
655 default:
656 UNSUPPORTED_COND(kMipsTst, branch->condition);
657 break;
658 }
659 __ And(at, i.InputRegister(0), i.InputOperand(1));
660 __ Branch(tlabel, cc, at, Operand(zero_reg));
661
662 } else if (instr->arch_opcode() == kMipsAddOvf ||
663 instr->arch_opcode() == kMipsSubOvf) {
664 // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
665 switch (branch->condition) {
666 case kOverflow:
667 cc = lt;
668 break;
669 case kNotOverflow:
670 cc = ge;
671 break;
672 default:
673 UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
674 break;
675 }
676 __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
677
678 } else if (instr->arch_opcode() == kMipsCmp) {
679 switch (branch->condition) {
680 case kEqual:
681 cc = eq;
682 break;
683 case kNotEqual:
684 cc = ne;
685 break;
686 case kSignedLessThan:
687 cc = lt;
688 break;
689 case kSignedGreaterThanOrEqual:
690 cc = ge;
691 break;
692 case kSignedLessThanOrEqual:
693 cc = le;
694 break;
695 case kSignedGreaterThan:
696 cc = gt;
697 break;
698 case kUnsignedLessThan:
699 cc = lo;
700 break;
701 case kUnsignedGreaterThanOrEqual:
702 cc = hs;
703 break;
704 case kUnsignedLessThanOrEqual:
705 cc = ls;
706 break;
707 case kUnsignedGreaterThan:
708 cc = hi;
709 break;
710 default:
711 UNSUPPORTED_COND(kMipsCmp, branch->condition);
712 break;
713 }
714 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
715
716 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
717
718 } else if (instr->arch_opcode() == kMipsCmpD) {
719 // TODO(dusmil) optimize unordered checks to use fewer instructions
720 // even if we have to unfold BranchF macro.
721 Label* nan = flabel;
722 switch (branch->condition) {
723 case kUnorderedEqual:
724 cc = eq;
725 break;
726 case kUnorderedNotEqual:
727 cc = ne;
728 nan = tlabel;
729 break;
730 case kUnorderedLessThan:
731 cc = lt;
732 break;
733 case kUnorderedGreaterThanOrEqual:
734 cc = ge;
735 nan = tlabel;
736 break;
737 case kUnorderedLessThanOrEqual:
738 cc = le;
739 break;
740 case kUnorderedGreaterThan:
741 cc = gt;
742 nan = tlabel;
743 break;
744 default:
745 UNSUPPORTED_COND(kMipsCmpD, branch->condition);
746 break;
747 }
748 __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
749 i.InputDoubleRegister(1));
750
751 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
752
753 } else {
754 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
755 instr->arch_opcode());
756 UNIMPLEMENTED();
757 }
758}
759
760
761void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
762 if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
763}
764
765
766// Assembles boolean materializations after an instruction.
767void CodeGenerator::AssembleArchBoolean(Instruction* instr,
768 FlagsCondition condition) {
769 MipsOperandConverter i(this, instr);
770 Label done;
771
772 // Materialize a full 32-bit 1 or 0 value. The result register is always the
773 // last output of the instruction.
774 Label false_value;
775 DCHECK_NE(0, instr->OutputCount());
776 Register result = i.OutputRegister(instr->OutputCount() - 1);
777 Condition cc = kNoCondition;
778
779 // MIPS does not have condition code flags, so compare and branch are
780 // implemented differently than on the other arch's. The compare operations
781 // emit mips psuedo-instructions, which are checked and handled here.
782
783 // For materializations, we use delay slot to set the result true, and
784 // in the false case, where we fall thru the branch, we reset the result
785 // false.
786
787 // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
788 // not separated by other instructions.
789 if (instr->arch_opcode() == kMipsTst) {
790 switch (condition) {
791 case kNotEqual:
792 cc = ne;
793 break;
794 case kEqual:
795 cc = eq;
796 break;
797 default:
798 UNSUPPORTED_COND(kMipsTst, condition);
799 break;
800 }
801 __ And(at, i.InputRegister(0), i.InputOperand(1));
802 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
803 __ li(result, Operand(1)); // In delay slot.
804
805 } else if (instr->arch_opcode() == kMipsAddOvf ||
806 instr->arch_opcode() == kMipsSubOvf) {
807 // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
808 switch (condition) {
809 case kOverflow:
810 cc = lt;
811 break;
812 case kNotOverflow:
813 cc = ge;
814 break;
815 default:
816 UNSUPPORTED_COND(kMipsAddOvf, condition);
817 break;
818 }
819 __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
820 __ li(result, Operand(1)); // In delay slot.
821
822
823 } else if (instr->arch_opcode() == kMipsCmp) {
824 Register left = i.InputRegister(0);
825 Operand right = i.InputOperand(1);
826 switch (condition) {
827 case kEqual:
828 cc = eq;
829 break;
830 case kNotEqual:
831 cc = ne;
832 break;
833 case kSignedLessThan:
834 cc = lt;
835 break;
836 case kSignedGreaterThanOrEqual:
837 cc = ge;
838 break;
839 case kSignedLessThanOrEqual:
840 cc = le;
841 break;
842 case kSignedGreaterThan:
843 cc = gt;
844 break;
845 case kUnsignedLessThan:
846 cc = lo;
847 break;
848 case kUnsignedGreaterThanOrEqual:
849 cc = hs;
850 break;
851 case kUnsignedLessThanOrEqual:
852 cc = ls;
853 break;
854 case kUnsignedGreaterThan:
855 cc = hi;
856 break;
857 default:
858 UNSUPPORTED_COND(kMipsCmp, condition);
859 break;
860 }
861 __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
862 __ li(result, Operand(1)); // In delay slot.
863
864 } else if (instr->arch_opcode() == kMipsCmpD) {
865 FPURegister left = i.InputDoubleRegister(0);
866 FPURegister right = i.InputDoubleRegister(1);
867 // TODO(plind): Provide NaN-testing macro-asm function without need for
868 // BranchF.
869 FPURegister dummy1 = f0;
870 FPURegister dummy2 = f2;
871 switch (condition) {
872 case kUnorderedEqual:
873 // TODO(plind): improve the NaN testing throughout this function.
874 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
875 cc = eq;
876 break;
877 case kUnorderedNotEqual:
878 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
879 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
880 cc = ne;
881 break;
882 case kUnorderedLessThan:
883 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
884 cc = lt;
885 break;
886 case kUnorderedGreaterThanOrEqual:
887 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
888 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
889 cc = ge;
890 break;
891 case kUnorderedLessThanOrEqual:
892 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
893 cc = le;
894 break;
895 case kUnorderedGreaterThan:
896 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
897 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
898 cc = gt;
899 break;
900 default:
901 UNSUPPORTED_COND(kMipsCmp, condition);
902 break;
903 }
904 __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
905 __ li(result, Operand(1)); // In delay slot - branch taken returns 1.
906 // Fall-thru (branch not taken) returns 0.
907
908 } else {
909 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
910 instr->arch_opcode());
911 TRACE_UNIMPL();
912 UNIMPLEMENTED();
913 }
914 // Fallthru case is the false materialization.
915 __ bind(&false_value);
916 __ li(result, Operand(0));
917 __ bind(&done);
918}
919
920
921void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
922 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
923 isolate(), deoptimization_id, Deoptimizer::LAZY);
924 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
925}
926
927
928void CodeGenerator::AssemblePrologue() {
929 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
930 if (descriptor->kind() == CallDescriptor::kCallAddress) {
931 __ Push(ra, fp);
932 __ mov(fp, sp);
933 const RegList saves = descriptor->CalleeSavedRegisters();
934 if (saves != 0) { // Save callee-saved registers.
935 // TODO(plind): make callee save size const, possibly DCHECK it.
936 int register_save_area_size = 0;
937 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
938 if (!((1 << i) & saves)) continue;
939 register_save_area_size += kPointerSize;
940 }
941 frame()->SetRegisterSaveAreaSize(register_save_area_size);
942 __ MultiPush(saves);
943 }
944 } else if (descriptor->IsJSFunctionCall()) {
945 CompilationInfo* info = this->info();
946 __ Prologue(info->IsCodePreAgingActive());
947 frame()->SetRegisterSaveAreaSize(
948 StandardFrameConstants::kFixedFrameSizeFromFp);
949 } else {
950 __ StubPrologue();
951 frame()->SetRegisterSaveAreaSize(
952 StandardFrameConstants::kFixedFrameSizeFromFp);
953 }
954 int stack_slots = frame()->GetSpillSlotCount();
955 if (stack_slots > 0) {
956 __ Subu(sp, sp, Operand(stack_slots * kPointerSize));
957 }
958}
959
960
961void CodeGenerator::AssembleReturn() {
962 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
963 if (descriptor->kind() == CallDescriptor::kCallAddress) {
964 if (frame()->GetRegisterSaveAreaSize() > 0) {
965 // Remove this frame's spill slots first.
966 int stack_slots = frame()->GetSpillSlotCount();
967 if (stack_slots > 0) {
968 __ Addu(sp, sp, Operand(stack_slots * kPointerSize));
969 }
970 // Restore registers.
971 const RegList saves = descriptor->CalleeSavedRegisters();
972 if (saves != 0) {
973 __ MultiPop(saves);
974 }
975 }
976 __ mov(sp, fp);
977 __ Pop(ra, fp);
978 __ Ret();
979 } else {
980 __ mov(sp, fp);
981 __ Pop(ra, fp);
982 int pop_count = descriptor->IsJSFunctionCall()
983 ? static_cast<int>(descriptor->JSParameterCount())
984 : 0;
985 __ DropAndRet(pop_count);
986 }
987}
988
989
990void CodeGenerator::AssembleMove(InstructionOperand* source,
991 InstructionOperand* destination) {
992 MipsOperandConverter g(this, NULL);
993 // Dispatch on the source and destination operand kinds. Not all
994 // combinations are possible.
995 if (source->IsRegister()) {
996 DCHECK(destination->IsRegister() || destination->IsStackSlot());
997 Register src = g.ToRegister(source);
998 if (destination->IsRegister()) {
999 __ mov(g.ToRegister(destination), src);
1000 } else {
1001 __ sw(src, g.ToMemOperand(destination));
1002 }
1003 } else if (source->IsStackSlot()) {
1004 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1005 MemOperand src = g.ToMemOperand(source);
1006 if (destination->IsRegister()) {
1007 __ lw(g.ToRegister(destination), src);
1008 } else {
1009 Register temp = kScratchReg;
1010 __ lw(temp, src);
1011 __ sw(temp, g.ToMemOperand(destination));
1012 }
1013 } else if (source->IsConstant()) {
1014 Constant src = g.ToConstant(source);
1015 if (destination->IsRegister() || destination->IsStackSlot()) {
1016 Register dst =
1017 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1018 switch (src.type()) {
1019 case Constant::kInt32:
1020 __ li(dst, Operand(src.ToInt32()));
1021 break;
1022 case Constant::kFloat32:
1023 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1024 break;
1025 case Constant::kInt64:
1026 UNREACHABLE();
1027 break;
1028 case Constant::kFloat64:
1029 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1030 break;
1031 case Constant::kExternalReference:
1032 __ li(dst, Operand(src.ToExternalReference()));
1033 break;
1034 case Constant::kHeapObject:
1035 __ li(dst, src.ToHeapObject());
1036 break;
1037 case Constant::kRpoNumber:
1038 UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
1039 break;
1040 }
1041 if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
1042 } else if (src.type() == Constant::kFloat32) {
1043 if (destination->IsDoubleStackSlot()) {
1044 MemOperand dst = g.ToMemOperand(destination);
1045 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
1046 __ sw(at, dst);
1047 } else {
1048 FloatRegister dst = g.ToSingleRegister(destination);
1049 __ Move(dst, src.ToFloat32());
1050 }
1051 } else {
1052 DCHECK_EQ(Constant::kFloat64, src.type());
1053 DoubleRegister dst = destination->IsDoubleRegister()
1054 ? g.ToDoubleRegister(destination)
1055 : kScratchDoubleReg;
1056 __ Move(dst, src.ToFloat64());
1057 if (destination->IsDoubleStackSlot()) {
1058 __ sdc1(dst, g.ToMemOperand(destination));
1059 }
1060 }
1061 } else if (source->IsDoubleRegister()) {
1062 FPURegister src = g.ToDoubleRegister(source);
1063 if (destination->IsDoubleRegister()) {
1064 FPURegister dst = g.ToDoubleRegister(destination);
1065 __ Move(dst, src);
1066 } else {
1067 DCHECK(destination->IsDoubleStackSlot());
1068 __ sdc1(src, g.ToMemOperand(destination));
1069 }
1070 } else if (source->IsDoubleStackSlot()) {
1071 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1072 MemOperand src = g.ToMemOperand(source);
1073 if (destination->IsDoubleRegister()) {
1074 __ ldc1(g.ToDoubleRegister(destination), src);
1075 } else {
1076 FPURegister temp = kScratchDoubleReg;
1077 __ ldc1(temp, src);
1078 __ sdc1(temp, g.ToMemOperand(destination));
1079 }
1080 } else {
1081 UNREACHABLE();
1082 }
1083}
1084
1085
1086void CodeGenerator::AssembleSwap(InstructionOperand* source,
1087 InstructionOperand* destination) {
1088 MipsOperandConverter g(this, NULL);
1089 // Dispatch on the source and destination operand kinds. Not all
1090 // combinations are possible.
1091 if (source->IsRegister()) {
1092 // Register-register.
1093 Register temp = kScratchReg;
1094 Register src = g.ToRegister(source);
1095 if (destination->IsRegister()) {
1096 Register dst = g.ToRegister(destination);
1097 __ Move(temp, src);
1098 __ Move(src, dst);
1099 __ Move(dst, temp);
1100 } else {
1101 DCHECK(destination->IsStackSlot());
1102 MemOperand dst = g.ToMemOperand(destination);
1103 __ mov(temp, src);
1104 __ lw(src, dst);
1105 __ sw(temp, dst);
1106 }
1107 } else if (source->IsStackSlot()) {
1108 DCHECK(destination->IsStackSlot());
1109 Register temp_0 = kScratchReg;
1110 Register temp_1 = kCompareReg;
1111 MemOperand src = g.ToMemOperand(source);
1112 MemOperand dst = g.ToMemOperand(destination);
1113 __ lw(temp_0, src);
1114 __ lw(temp_1, dst);
1115 __ sw(temp_0, dst);
1116 __ sw(temp_1, src);
1117 } else if (source->IsDoubleRegister()) {
1118 FPURegister temp = kScratchDoubleReg;
1119 FPURegister src = g.ToDoubleRegister(source);
1120 if (destination->IsDoubleRegister()) {
1121 FPURegister dst = g.ToDoubleRegister(destination);
1122 __ Move(temp, src);
1123 __ Move(src, dst);
1124 __ Move(dst, temp);
1125 } else {
1126 DCHECK(destination->IsDoubleStackSlot());
1127 MemOperand dst = g.ToMemOperand(destination);
1128 __ Move(temp, src);
1129 __ ldc1(src, dst);
1130 __ sdc1(temp, dst);
1131 }
1132 } else if (source->IsDoubleStackSlot()) {
1133 DCHECK(destination->IsDoubleStackSlot());
1134 Register temp_0 = kScratchReg;
1135 FPURegister temp_1 = kScratchDoubleReg;
1136 MemOperand src0 = g.ToMemOperand(source);
1137 MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
1138 MemOperand dst0 = g.ToMemOperand(destination);
1139 MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
1140 __ ldc1(temp_1, dst0); // Save destination in temp_1.
1141 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
1142 __ sw(temp_0, dst0);
1143 __ lw(temp_0, src1);
1144 __ sw(temp_0, dst1);
1145 __ sdc1(temp_1, src0);
1146 } else {
1147 // No other combinations are possible.
1148 UNREACHABLE();
1149 }
1150}
1151
1152
1153void CodeGenerator::AddNopForSmiCodeInlining() {
1154 // Unused on 32-bit ARM. Still exists on 64-bit arm.
1155 // TODO(plind): Unclear when this is called now. Understand, fix if needed.
1156 __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
1157}
1158
1159
1160void CodeGenerator::EnsureSpaceForLazyDeopt() {
1161 int space_needed = Deoptimizer::patch_size();
1162 if (!info()->IsStub()) {
1163 // Ensure that we have enough space after the previous lazy-bailout
1164 // instruction for patching the code here.
1165 int current_pc = masm()->pc_offset();
1166 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1167 // Block tramoline pool emission for duration of padding.
1168 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
1169 masm());
1170 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1171 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1172 while (padding_size > 0) {
1173 __ nop();
1174 padding_size -= v8::internal::Assembler::kInstrSize;
1175 }
1176 }
1177 }
1178 MarkLazyDeoptSite();
1179}
1180
1181#undef __
1182
1183} // namespace compiler
1184} // namespace internal
1185} // namespace v8