blob: dee7705f055fba49a715a610a6aa8ef73f78f26c [file] [log] [blame]
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6#include "src/compiler/code-generator-impl.h"
7#include "src/compiler/gap-resolver.h"
8#include "src/compiler/node-matchers.h"
9#include "src/compiler/node-properties-inl.h"
10#include "src/mips/macro-assembler-mips.h"
11#include "src/scopes.h"
12
13namespace v8 {
14namespace internal {
15namespace compiler {
16
17#define __ masm()->
18
19
20// TODO(plind): Possibly avoid using these lithium names.
21#define kScratchReg kLithiumScratchReg
22#define kScratchReg2 kLithiumScratchReg2
23#define kScratchDoubleReg kLithiumScratchDouble
24
25
26// TODO(plind): consider renaming these macros.
27#define TRACE_MSG(msg) \
28 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
29 __LINE__)
30
31#define TRACE_UNIMPL() \
32 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
33 __LINE__)
34
35
36// Adds Mips-specific methods to convert InstructionOperands.
37class MipsOperandConverter FINAL : public InstructionOperandConverter {
38 public:
39 MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
40 : InstructionOperandConverter(gen, instr) {}
41
42 FloatRegister OutputSingleRegister(int index = 0) {
43 return ToSingleRegister(instr_->OutputAt(index));
44 }
45
46 FloatRegister InputSingleRegister(int index) {
47 return ToSingleRegister(instr_->InputAt(index));
48 }
49
50 FloatRegister ToSingleRegister(InstructionOperand* op) {
51 // Single (Float) and Double register namespace is same on MIPS,
52 // both are typedefs of FPURegister.
53 return ToDoubleRegister(op);
54 }
55
56 Operand InputImmediate(int index) {
57 Constant constant = ToConstant(instr_->InputAt(index));
58 switch (constant.type()) {
59 case Constant::kInt32:
60 return Operand(constant.ToInt32());
61 case Constant::kInt64:
62 return Operand(constant.ToInt64());
63 case Constant::kFloat32:
64 return Operand(
65 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
66 case Constant::kFloat64:
67 return Operand(
68 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
69 case Constant::kExternalReference:
70 case Constant::kHeapObject:
71 // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
72 // maybe not done on arm due to const pool ??
73 break;
74 case Constant::kRpoNumber:
75 UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
76 break;
77 }
78 UNREACHABLE();
79 return Operand(zero_reg);
80 }
81
82 Operand InputOperand(int index) {
83 InstructionOperand* op = instr_->InputAt(index);
84 if (op->IsRegister()) {
85 return Operand(ToRegister(op));
86 }
87 return InputImmediate(index);
88 }
89
90 MemOperand MemoryOperand(int* first_index) {
91 const int index = *first_index;
92 switch (AddressingModeField::decode(instr_->opcode())) {
93 case kMode_None:
94 break;
95 case kMode_MRI:
96 *first_index += 2;
97 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
98 case kMode_MRR:
99 // TODO(plind): r6 address mode, to be implemented ...
100 UNREACHABLE();
101 }
102 UNREACHABLE();
103 return MemOperand(no_reg);
104 }
105
106 MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
107
108 MemOperand ToMemOperand(InstructionOperand* op) const {
109 DCHECK(op != NULL);
110 DCHECK(!op->IsRegister());
111 DCHECK(!op->IsDoubleRegister());
112 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
113 // The linkage computes where all spill slots are located.
114 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
115 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
116 }
117};
118
119
120static inline bool HasRegisterInput(Instruction* instr, int index) {
121 return instr->InputAt(index)->IsRegister();
122}
123
124
125namespace {
126
127class OutOfLineLoadSingle FINAL : public OutOfLineCode {
128 public:
129 OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
130 : OutOfLineCode(gen), result_(result) {}
131
132 void Generate() FINAL {
133 __ Move(result_, std::numeric_limits<float>::quiet_NaN());
134 }
135
136 private:
137 FloatRegister const result_;
138};
139
140
141class OutOfLineLoadDouble FINAL : public OutOfLineCode {
142 public:
143 OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
144 : OutOfLineCode(gen), result_(result) {}
145
146 void Generate() FINAL {
147 __ Move(result_, std::numeric_limits<double>::quiet_NaN());
148 }
149
150 private:
151 DoubleRegister const result_;
152};
153
154
155class OutOfLineLoadInteger FINAL : public OutOfLineCode {
156 public:
157 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
158 : OutOfLineCode(gen), result_(result) {}
159
160 void Generate() FINAL { __ mov(result_, zero_reg); }
161
162 private:
163 Register const result_;
164};
165
166
167class OutOfLineRound : public OutOfLineCode {
168 public:
169 OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
170 : OutOfLineCode(gen), result_(result) {}
171
172 void Generate() FINAL {
173 // Handle rounding to zero case where sign has to be preserved.
174 // High bits of double input already in kScratchReg.
175 __ dsrl(at, kScratchReg, 31);
176 __ dsll(at, at, 31);
177 __ mthc1(at, result_);
178 }
179
180 private:
181 DoubleRegister const result_;
182};
183
184
185class OutOfLineTruncate FINAL : public OutOfLineRound {
186 public:
187 OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
188 : OutOfLineRound(gen, result) {}
189};
190
191
192class OutOfLineFloor FINAL : public OutOfLineRound {
193 public:
194 OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
195 : OutOfLineRound(gen, result) {}
196};
197
198
199class OutOfLineCeil FINAL : public OutOfLineRound {
200 public:
201 OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
202 : OutOfLineRound(gen, result) {}
203};
204
205
206} // namespace
207
208
209#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
210 do { \
211 auto result = i.Output##width##Register(); \
212 auto ool = new (zone()) OutOfLineLoad##width(this, result); \
213 if (instr->InputAt(0)->IsRegister()) { \
214 auto offset = i.InputRegister(0); \
215 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
216 __ Daddu(at, i.InputRegister(2), offset); \
217 __ asm_instr(result, MemOperand(at, 0)); \
218 } else { \
219 auto offset = i.InputOperand(0).immediate(); \
220 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
221 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
222 } \
223 __ bind(ool->exit()); \
224 } while (0)
225
226
227#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
228 do { \
229 auto result = i.OutputRegister(); \
230 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
231 if (instr->InputAt(0)->IsRegister()) { \
232 auto offset = i.InputRegister(0); \
233 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
234 __ Daddu(at, i.InputRegister(2), offset); \
235 __ asm_instr(result, MemOperand(at, 0)); \
236 } else { \
237 auto offset = i.InputOperand(0).immediate(); \
238 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
239 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
240 } \
241 __ bind(ool->exit()); \
242 } while (0)
243
244
245#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
246 do { \
247 Label done; \
248 if (instr->InputAt(0)->IsRegister()) { \
249 auto offset = i.InputRegister(0); \
250 auto value = i.Input##width##Register(2); \
251 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
252 __ Daddu(at, i.InputRegister(3), offset); \
253 __ asm_instr(value, MemOperand(at, 0)); \
254 } else { \
255 auto offset = i.InputOperand(0).immediate(); \
256 auto value = i.Input##width##Register(2); \
257 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
258 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
259 } \
260 __ bind(&done); \
261 } while (0)
262
263
264#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
265 do { \
266 Label done; \
267 if (instr->InputAt(0)->IsRegister()) { \
268 auto offset = i.InputRegister(0); \
269 auto value = i.InputRegister(2); \
270 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
271 __ Daddu(at, i.InputRegister(3), offset); \
272 __ asm_instr(value, MemOperand(at, 0)); \
273 } else { \
274 auto offset = i.InputOperand(0).immediate(); \
275 auto value = i.InputRegister(2); \
276 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
277 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
278 } \
279 __ bind(&done); \
280 } while (0)
281
282
283#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
284 do { \
285 auto ool = \
286 new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
287 Label done; \
288 __ mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
289 __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
290 HeapNumber::kExponentBits); \
291 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
292 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
293 __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
294 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
295 __ dmfc1(at, i.OutputDoubleRegister()); \
296 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
297 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
298 __ bind(ool->exit()); \
299 __ bind(&done); \
300 } while (0)
301
302
303// Assembles an instruction after register allocation, producing machine code.
304void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
305 MipsOperandConverter i(this, instr);
306 InstructionCode opcode = instr->opcode();
307
308 switch (ArchOpcodeField::decode(opcode)) {
309 case kArchCallCodeObject: {
310 EnsureSpaceForLazyDeopt();
311 if (instr->InputAt(0)->IsImmediate()) {
312 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
313 RelocInfo::CODE_TARGET);
314 } else {
315 __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
316 __ Call(at);
317 }
318 AddSafepointAndDeopt(instr);
319 break;
320 }
321 case kArchCallJSFunction: {
322 EnsureSpaceForLazyDeopt();
323 Register func = i.InputRegister(0);
324 if (FLAG_debug_code) {
325 // Check the function's context matches the context argument.
326 __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
327 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
328 }
329
330 __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
331 __ Call(at);
332 AddSafepointAndDeopt(instr);
333 break;
334 }
335 case kArchJmp:
336 AssembleArchJump(i.InputRpo(0));
337 break;
338 case kArchNop:
339 // don't emit code for nops.
340 break;
341 case kArchRet:
342 AssembleReturn();
343 break;
344 case kArchStackPointer:
345 __ mov(i.OutputRegister(), sp);
346 break;
347 case kArchTruncateDoubleToI:
348 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
349 break;
350 case kMips64Add:
351 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
352 break;
353 case kMips64Dadd:
354 __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
355 break;
356 case kMips64Sub:
357 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
358 break;
359 case kMips64Dsub:
360 __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
361 break;
362 case kMips64Mul:
363 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
364 break;
365 case kMips64MulHigh:
366 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
367 break;
368 case kMips64MulHighU:
369 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
370 break;
371 case kMips64Div:
372 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
373 break;
374 case kMips64DivU:
375 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
376 break;
377 case kMips64Mod:
378 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
379 break;
380 case kMips64ModU:
381 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
382 break;
383 case kMips64Dmul:
384 __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
385 break;
386 case kMips64Ddiv:
387 __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
388 break;
389 case kMips64DdivU:
390 __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
391 break;
392 case kMips64Dmod:
393 __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
394 break;
395 case kMips64DmodU:
396 __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
397 break;
398 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
399 break;
400 case kMips64And:
401 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
402 break;
403 case kMips64Or:
404 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
405 break;
406 case kMips64Xor:
407 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
408 break;
409 case kMips64Shl:
410 if (instr->InputAt(1)->IsRegister()) {
411 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
412 } else {
413 int32_t imm = i.InputOperand(1).immediate();
414 __ sll(i.OutputRegister(), i.InputRegister(0), imm);
415 }
416 break;
417 case kMips64Shr:
418 if (instr->InputAt(1)->IsRegister()) {
419 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
420 } else {
421 int32_t imm = i.InputOperand(1).immediate();
422 __ srl(i.OutputRegister(), i.InputRegister(0), imm);
423 }
424 break;
425 case kMips64Sar:
426 if (instr->InputAt(1)->IsRegister()) {
427 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
428 } else {
429 int32_t imm = i.InputOperand(1).immediate();
430 __ sra(i.OutputRegister(), i.InputRegister(0), imm);
431 }
432 break;
433 case kMips64Ext:
434 __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
435 i.InputInt8(2));
436 break;
437 case kMips64Dext:
438 __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
439 i.InputInt8(2));
440 break;
441 case kMips64Dshl:
442 if (instr->InputAt(1)->IsRegister()) {
443 __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
444 } else {
445 int32_t imm = i.InputOperand(1).immediate();
446 if (imm < 32) {
447 __ dsll(i.OutputRegister(), i.InputRegister(0), imm);
448 } else {
449 __ dsll32(i.OutputRegister(), i.InputRegister(0), imm - 32);
450 }
451 }
452 break;
453 case kMips64Dshr:
454 if (instr->InputAt(1)->IsRegister()) {
455 __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
456 } else {
457 int32_t imm = i.InputOperand(1).immediate();
458 if (imm < 32) {
459 __ dsrl(i.OutputRegister(), i.InputRegister(0), imm);
460 } else {
461 __ dsrl32(i.OutputRegister(), i.InputRegister(0), imm - 32);
462 }
463 }
464 break;
465 case kMips64Dsar:
466 if (instr->InputAt(1)->IsRegister()) {
467 __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
468 } else {
469 int32_t imm = i.InputOperand(1).immediate();
470 if (imm < 32) {
471 __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
472 } else {
473 __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
474 }
475 }
476 break;
477 case kMips64Ror:
478 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
479 break;
480 case kMips64Dror:
481 __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
482 break;
483 case kMips64Tst:
484 case kMips64Tst32:
485 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
486 break;
487 case kMips64Cmp:
488 case kMips64Cmp32:
489 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
490 break;
491 case kMips64Mov:
492 // TODO(plind): Should we combine mov/li like this, or use separate instr?
493 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
494 if (HasRegisterInput(instr, 0)) {
495 __ mov(i.OutputRegister(), i.InputRegister(0));
496 } else {
497 __ li(i.OutputRegister(), i.InputOperand(0));
498 }
499 break;
500
501 case kMips64CmpD:
502 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
503 break;
504 case kMips64AddD:
505 // TODO(plind): add special case: combine mult & add.
506 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
507 i.InputDoubleRegister(1));
508 break;
509 case kMips64SubD:
510 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
511 i.InputDoubleRegister(1));
512 break;
513 case kMips64MulD:
514 // TODO(plind): add special case: right op is -1.0, see arm port.
515 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
516 i.InputDoubleRegister(1));
517 break;
518 case kMips64DivD:
519 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
520 i.InputDoubleRegister(1));
521 break;
522 case kMips64ModD: {
523 // TODO(bmeurer): We should really get rid of this special instruction,
524 // and generate a CallAddress instruction instead.
525 FrameScope scope(masm(), StackFrame::MANUAL);
526 __ PrepareCallCFunction(0, 2, kScratchReg);
527 __ MovToFloatParameters(i.InputDoubleRegister(0),
528 i.InputDoubleRegister(1));
529 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
530 0, 2);
531 // Move the result in the double result register.
532 __ MovFromFloatResult(i.OutputDoubleRegister());
533 break;
534 }
535 case kMips64Float64Floor: {
536 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
537 break;
538 }
539 case kMips64Float64Ceil: {
540 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
541 break;
542 }
543 case kMips64Float64RoundTruncate: {
544 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
545 break;
546 }
547 case kMips64SqrtD: {
548 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
549 break;
550 }
551 case kMips64CvtSD: {
552 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
553 break;
554 }
555 case kMips64CvtDS: {
556 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
557 break;
558 }
559 case kMips64CvtDW: {
560 FPURegister scratch = kScratchDoubleReg;
561 __ mtc1(i.InputRegister(0), scratch);
562 __ cvt_d_w(i.OutputDoubleRegister(), scratch);
563 break;
564 }
565 case kMips64CvtDUw: {
566 FPURegister scratch = kScratchDoubleReg;
567 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
568 break;
569 }
570 case kMips64TruncWD: {
571 FPURegister scratch = kScratchDoubleReg;
572 // Other arches use round to zero here, so we follow.
573 __ trunc_w_d(scratch, i.InputDoubleRegister(0));
574 __ mfc1(i.OutputRegister(), scratch);
575 break;
576 }
577 case kMips64TruncUwD: {
578 FPURegister scratch = kScratchDoubleReg;
579 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
580 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
581 break;
582 }
583 // ... more basic instructions ...
584
585 case kMips64Lbu:
586 __ lbu(i.OutputRegister(), i.MemoryOperand());
587 break;
588 case kMips64Lb:
589 __ lb(i.OutputRegister(), i.MemoryOperand());
590 break;
591 case kMips64Sb:
592 __ sb(i.InputRegister(2), i.MemoryOperand());
593 break;
594 case kMips64Lhu:
595 __ lhu(i.OutputRegister(), i.MemoryOperand());
596 break;
597 case kMips64Lh:
598 __ lh(i.OutputRegister(), i.MemoryOperand());
599 break;
600 case kMips64Sh:
601 __ sh(i.InputRegister(2), i.MemoryOperand());
602 break;
603 case kMips64Lw:
604 __ lw(i.OutputRegister(), i.MemoryOperand());
605 break;
606 case kMips64Ld:
607 __ ld(i.OutputRegister(), i.MemoryOperand());
608 break;
609 case kMips64Sw:
610 __ sw(i.InputRegister(2), i.MemoryOperand());
611 break;
612 case kMips64Sd:
613 __ sd(i.InputRegister(2), i.MemoryOperand());
614 break;
615 case kMips64Lwc1: {
616 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
617 break;
618 }
619 case kMips64Swc1: {
620 int index = 0;
621 MemOperand operand = i.MemoryOperand(&index);
622 __ swc1(i.InputSingleRegister(index), operand);
623 break;
624 }
625 case kMips64Ldc1:
626 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
627 break;
628 case kMips64Sdc1:
629 __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
630 break;
631 case kMips64Push:
632 __ Push(i.InputRegister(0));
633 break;
634 case kMips64StackClaim: {
635 int words = MiscField::decode(instr->opcode());
636 __ Dsubu(sp, sp, Operand(words << kPointerSizeLog2));
637 break;
638 }
639 case kMips64StoreToStackSlot: {
640 int slot = MiscField::decode(instr->opcode());
641 __ sd(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
642 break;
643 }
644 case kMips64StoreWriteBarrier: {
645 Register object = i.InputRegister(0);
646 Register index = i.InputRegister(1);
647 Register value = i.InputRegister(2);
648 __ daddu(index, object, index);
649 __ sd(value, MemOperand(index));
650 SaveFPRegsMode mode =
651 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
652 RAStatus ra_status = kRAHasNotBeenSaved;
653 __ RecordWrite(object, index, value, ra_status, mode);
654 break;
655 }
656 case kCheckedLoadInt8:
657 ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
658 break;
659 case kCheckedLoadUint8:
660 ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
661 break;
662 case kCheckedLoadInt16:
663 ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
664 break;
665 case kCheckedLoadUint16:
666 ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
667 break;
668 case kCheckedLoadWord32:
669 ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
670 break;
671 case kCheckedLoadFloat32:
672 ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
673 break;
674 case kCheckedLoadFloat64:
675 ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
676 break;
677 case kCheckedStoreWord8:
678 ASSEMBLE_CHECKED_STORE_INTEGER(sb);
679 break;
680 case kCheckedStoreWord16:
681 ASSEMBLE_CHECKED_STORE_INTEGER(sh);
682 break;
683 case kCheckedStoreWord32:
684 ASSEMBLE_CHECKED_STORE_INTEGER(sw);
685 break;
686 case kCheckedStoreFloat32:
687 ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
688 break;
689 case kCheckedStoreFloat64:
690 ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
691 break;
692 }
693}
694
695
696#define UNSUPPORTED_COND(opcode, condition) \
697 OFStream out(stdout); \
698 out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
699 UNIMPLEMENTED();
700
701// Assembles branches after an instruction.
702void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
703 MipsOperandConverter i(this, instr);
704 Label* tlabel = branch->true_label;
705 Label* flabel = branch->false_label;
706 Condition cc = kNoCondition;
707
708 // MIPS does not have condition code flags, so compare and branch are
709 // implemented differently than on the other arch's. The compare operations
710 // emit mips psuedo-instructions, which are handled here by branch
711 // instructions that do the actual comparison. Essential that the input
712 // registers to compare psuedo-op are not modified before this branch op, as
713 // they are tested here.
714 // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
715 // not separated by other instructions.
716
717 if (instr->arch_opcode() == kMips64Tst) {
718 switch (branch->condition) {
719 case kNotEqual:
720 cc = ne;
721 break;
722 case kEqual:
723 cc = eq;
724 break;
725 default:
726 UNSUPPORTED_COND(kMips64Tst, branch->condition);
727 break;
728 }
729 __ And(at, i.InputRegister(0), i.InputOperand(1));
730 __ Branch(tlabel, cc, at, Operand(zero_reg));
731 } else if (instr->arch_opcode() == kMips64Tst32) {
732 switch (branch->condition) {
733 case kNotEqual:
734 cc = ne;
735 break;
736 case kEqual:
737 cc = eq;
738 break;
739 default:
740 UNSUPPORTED_COND(kMips64Tst32, branch->condition);
741 break;
742 }
743 // Zero-extend registers on MIPS64 only 64-bit operand
744 // branch and compare op. is available.
745 // This is a disadvantage to perform 32-bit operation on MIPS64.
746 // Try to force globally in front-end Word64 representation to be preferred
747 // for MIPS64 even for Word32.
748 __ And(at, i.InputRegister(0), i.InputOperand(1));
749 __ Dext(at, at, 0, 32);
750 __ Branch(tlabel, cc, at, Operand(zero_reg));
751 } else if (instr->arch_opcode() == kMips64Dadd ||
752 instr->arch_opcode() == kMips64Dsub) {
753 switch (branch->condition) {
754 case kOverflow:
755 cc = ne;
756 break;
757 case kNotOverflow:
758 cc = eq;
759 break;
760 default:
761 UNSUPPORTED_COND(kMips64Dadd, branch->condition);
762 break;
763 }
764
765 __ dsra32(kScratchReg, i.OutputRegister(), 0);
766 __ sra(at, i.OutputRegister(), 31);
767 __ Branch(tlabel, cc, at, Operand(kScratchReg));
768 } else if (instr->arch_opcode() == kMips64Cmp) {
769 switch (branch->condition) {
770 case kEqual:
771 cc = eq;
772 break;
773 case kNotEqual:
774 cc = ne;
775 break;
776 case kSignedLessThan:
777 cc = lt;
778 break;
779 case kSignedGreaterThanOrEqual:
780 cc = ge;
781 break;
782 case kSignedLessThanOrEqual:
783 cc = le;
784 break;
785 case kSignedGreaterThan:
786 cc = gt;
787 break;
788 case kUnsignedLessThan:
789 cc = lo;
790 break;
791 case kUnsignedGreaterThanOrEqual:
792 cc = hs;
793 break;
794 case kUnsignedLessThanOrEqual:
795 cc = ls;
796 break;
797 case kUnsignedGreaterThan:
798 cc = hi;
799 break;
800 default:
801 UNSUPPORTED_COND(kMips64Cmp, branch->condition);
802 break;
803 }
804 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
805
806 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
807
808 } else if (instr->arch_opcode() == kMips64Cmp32) {
809 switch (branch->condition) {
810 case kEqual:
811 cc = eq;
812 break;
813 case kNotEqual:
814 cc = ne;
815 break;
816 case kSignedLessThan:
817 cc = lt;
818 break;
819 case kSignedGreaterThanOrEqual:
820 cc = ge;
821 break;
822 case kSignedLessThanOrEqual:
823 cc = le;
824 break;
825 case kSignedGreaterThan:
826 cc = gt;
827 break;
828 case kUnsignedLessThan:
829 cc = lo;
830 break;
831 case kUnsignedGreaterThanOrEqual:
832 cc = hs;
833 break;
834 case kUnsignedLessThanOrEqual:
835 cc = ls;
836 break;
837 case kUnsignedGreaterThan:
838 cc = hi;
839 break;
840 default:
841 UNSUPPORTED_COND(kMips64Cmp32, branch->condition);
842 break;
843 }
844
845 switch (branch->condition) {
846 case kEqual:
847 case kNotEqual:
848 case kSignedLessThan:
849 case kSignedGreaterThanOrEqual:
850 case kSignedLessThanOrEqual:
851 case kSignedGreaterThan:
852 // Sign-extend registers on MIPS64 only 64-bit operand
853 // branch and compare op. is available.
854 __ sll(i.InputRegister(0), i.InputRegister(0), 0);
855 if (instr->InputAt(1)->IsRegister()) {
856 __ sll(i.InputRegister(1), i.InputRegister(1), 0);
857 }
858 break;
859 case kUnsignedLessThan:
860 case kUnsignedGreaterThanOrEqual:
861 case kUnsignedLessThanOrEqual:
862 case kUnsignedGreaterThan:
863 // Zero-extend registers on MIPS64 only 64-bit operand
864 // branch and compare op. is available.
865 __ Dext(i.InputRegister(0), i.InputRegister(0), 0, 32);
866 if (instr->InputAt(1)->IsRegister()) {
867 __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
868 }
869 break;
870 default:
871 UNSUPPORTED_COND(kMips64Cmp, branch->condition);
872 break;
873 }
874 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
875
876 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
877 } else if (instr->arch_opcode() == kMips64CmpD) {
878 // TODO(dusmil) optimize unordered checks to use less instructions
879 // even if we have to unfold BranchF macro.
880 Label* nan = flabel;
881 switch (branch->condition) {
882 case kUnorderedEqual:
883 cc = eq;
884 break;
885 case kUnorderedNotEqual:
886 cc = ne;
887 nan = tlabel;
888 break;
889 case kUnorderedLessThan:
890 cc = lt;
891 break;
892 case kUnorderedGreaterThanOrEqual:
893 cc = ge;
894 nan = tlabel;
895 break;
896 case kUnorderedLessThanOrEqual:
897 cc = le;
898 break;
899 case kUnorderedGreaterThan:
900 cc = gt;
901 nan = tlabel;
902 break;
903 default:
904 UNSUPPORTED_COND(kMips64CmpD, branch->condition);
905 break;
906 }
907 __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
908 i.InputDoubleRegister(1));
909
910 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
911 } else {
912 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
913 instr->arch_opcode());
914 UNIMPLEMENTED();
915 }
916}
917
918
919void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
920 if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
921}
922
923
924// Assembles boolean materializations after an instruction.
925void CodeGenerator::AssembleArchBoolean(Instruction* instr,
926 FlagsCondition condition) {
927 MipsOperandConverter i(this, instr);
928 Label done;
929
930 // Materialize a full 32-bit 1 or 0 value. The result register is always the
931 // last output of the instruction.
932 Label false_value;
933 DCHECK_NE(0, instr->OutputCount());
934 Register result = i.OutputRegister(instr->OutputCount() - 1);
935 Condition cc = kNoCondition;
936
937 // MIPS does not have condition code flags, so compare and branch are
938 // implemented differently than on the other arch's. The compare operations
939 // emit mips pseudo-instructions, which are checked and handled here.
940
941 // For materializations, we use delay slot to set the result true, and
942 // in the false case, where we fall through the branch, we reset the result
943 // false.
944
945 if (instr->arch_opcode() == kMips64Tst) {
946 switch (condition) {
947 case kNotEqual:
948 cc = ne;
949 break;
950 case kEqual:
951 cc = eq;
952 break;
953 default:
954 UNSUPPORTED_COND(kMips64Tst, condition);
955 break;
956 }
957 __ And(at, i.InputRegister(0), i.InputOperand(1));
958 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
959 __ li(result, Operand(1)); // In delay slot.
960 } else if (instr->arch_opcode() == kMips64Tst32) {
961 switch (condition) {
962 case kNotEqual:
963 cc = ne;
964 break;
965 case kEqual:
966 cc = eq;
967 break;
968 default:
969 UNSUPPORTED_COND(kMips64Tst, condition);
970 break;
971 }
972 // Zero-extend register on MIPS64 only 64-bit operand
973 // branch and compare op. is available.
974 __ And(at, i.InputRegister(0), i.InputOperand(1));
975 __ Dext(at, at, 0, 32);
976 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
977 __ li(result, Operand(1)); // In delay slot.
978 } else if (instr->arch_opcode() == kMips64Dadd ||
979 instr->arch_opcode() == kMips64Dsub) {
980 switch (condition) {
981 case kOverflow:
982 cc = ne;
983 break;
984 case kNotOverflow:
985 cc = eq;
986 break;
987 default:
988 UNSUPPORTED_COND(kMips64DAdd, condition);
989 break;
990 }
991 __ dsra32(kScratchReg, i.OutputRegister(), 0);
992 __ sra(at, i.OutputRegister(), 31);
993 __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(kScratchReg));
994 __ li(result, Operand(1)); // In delay slot.
995 } else if (instr->arch_opcode() == kMips64Cmp) {
996 Register left = i.InputRegister(0);
997 Operand right = i.InputOperand(1);
998 switch (condition) {
999 case kEqual:
1000 cc = eq;
1001 break;
1002 case kNotEqual:
1003 cc = ne;
1004 break;
1005 case kSignedLessThan:
1006 cc = lt;
1007 break;
1008 case kSignedGreaterThanOrEqual:
1009 cc = ge;
1010 break;
1011 case kSignedLessThanOrEqual:
1012 cc = le;
1013 break;
1014 case kSignedGreaterThan:
1015 cc = gt;
1016 break;
1017 case kUnsignedLessThan:
1018 cc = lo;
1019 break;
1020 case kUnsignedGreaterThanOrEqual:
1021 cc = hs;
1022 break;
1023 case kUnsignedLessThanOrEqual:
1024 cc = ls;
1025 break;
1026 case kUnsignedGreaterThan:
1027 cc = hi;
1028 break;
1029 default:
1030 UNSUPPORTED_COND(kMips64Cmp, condition);
1031 break;
1032 }
1033 __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
1034 __ li(result, Operand(1)); // In delay slot.
1035 } else if (instr->arch_opcode() == kMips64Cmp32) {
1036 Register left = i.InputRegister(0);
1037 Operand right = i.InputOperand(1);
1038 switch (condition) {
1039 case kEqual:
1040 cc = eq;
1041 break;
1042 case kNotEqual:
1043 cc = ne;
1044 break;
1045 case kSignedLessThan:
1046 cc = lt;
1047 break;
1048 case kSignedGreaterThanOrEqual:
1049 cc = ge;
1050 break;
1051 case kSignedLessThanOrEqual:
1052 cc = le;
1053 break;
1054 case kSignedGreaterThan:
1055 cc = gt;
1056 break;
1057 case kUnsignedLessThan:
1058 cc = lo;
1059 break;
1060 case kUnsignedGreaterThanOrEqual:
1061 cc = hs;
1062 break;
1063 case kUnsignedLessThanOrEqual:
1064 cc = ls;
1065 break;
1066 case kUnsignedGreaterThan:
1067 cc = hi;
1068 break;
1069 default:
1070 UNSUPPORTED_COND(kMips64Cmp, condition);
1071 break;
1072 }
1073
1074 switch (condition) {
1075 case kEqual:
1076 case kNotEqual:
1077 case kSignedLessThan:
1078 case kSignedGreaterThanOrEqual:
1079 case kSignedLessThanOrEqual:
1080 case kSignedGreaterThan:
1081 // Sign-extend registers on MIPS64 only 64-bit operand
1082 // branch and compare op. is available.
1083 __ sll(left, left, 0);
1084 if (instr->InputAt(1)->IsRegister()) {
1085 __ sll(i.InputRegister(1), i.InputRegister(1), 0);
1086 }
1087 break;
1088 case kUnsignedLessThan:
1089 case kUnsignedGreaterThanOrEqual:
1090 case kUnsignedLessThanOrEqual:
1091 case kUnsignedGreaterThan:
1092 // Zero-extend registers on MIPS64 only 64-bit operand
1093 // branch and compare op. is available.
1094 __ Dext(left, left, 0, 32);
1095 if (instr->InputAt(1)->IsRegister()) {
1096 __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
1097 }
1098 break;
1099 default:
1100 UNSUPPORTED_COND(kMips64Cmp32, condition);
1101 break;
1102 }
1103 __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
1104 __ li(result, Operand(1)); // In delay slot.
1105 } else if (instr->arch_opcode() == kMips64CmpD) {
1106 FPURegister left = i.InputDoubleRegister(0);
1107 FPURegister right = i.InputDoubleRegister(1);
1108 // TODO(plind): Provide NaN-testing macro-asm function without need for
1109 // BranchF.
1110 FPURegister dummy1 = f0;
1111 FPURegister dummy2 = f2;
1112 switch (condition) {
1113 case kUnorderedEqual:
1114 // TODO(plind): improve the NaN testing throughout this function.
1115 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
1116 cc = eq;
1117 break;
1118 case kUnorderedNotEqual:
1119 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
1120 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
1121 cc = ne;
1122 break;
1123 case kUnorderedLessThan:
1124 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
1125 cc = lt;
1126 break;
1127 case kUnorderedGreaterThanOrEqual:
1128 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
1129 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
1130 cc = ge;
1131 break;
1132 case kUnorderedLessThanOrEqual:
1133 __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
1134 cc = le;
1135 break;
1136 case kUnorderedGreaterThan:
1137 __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
1138 __ li(result, Operand(1)); // In delay slot - returns 1 on NaN.
1139 cc = gt;
1140 break;
1141 default:
1142 UNSUPPORTED_COND(kMips64Cmp, condition);
1143 break;
1144 }
1145 __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
1146 __ li(result, Operand(1)); // In delay slot - branch taken returns 1.
1147 // Fall-thru (branch not taken) returns 0.
1148
1149 } else {
1150 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
1151 instr->arch_opcode());
1152 TRACE_UNIMPL();
1153 UNIMPLEMENTED();
1154 }
1155 // Fallthru case is the false materialization.
1156 __ bind(&false_value);
1157 __ li(result, Operand(static_cast<int64_t>(0)));
1158 __ bind(&done);
1159}
1160
1161
1162void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
1163 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1164 isolate(), deoptimization_id, Deoptimizer::LAZY);
1165 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1166}
1167
1168
1169void CodeGenerator::AssemblePrologue() {
1170 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1171 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1172 __ Push(ra, fp);
1173 __ mov(fp, sp);
1174 const RegList saves = descriptor->CalleeSavedRegisters();
1175 if (saves != 0) { // Save callee-saved registers.
1176 // TODO(plind): make callee save size const, possibly DCHECK it.
1177 int register_save_area_size = 0;
1178 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1179 if (!((1 << i) & saves)) continue;
1180 register_save_area_size += kPointerSize;
1181 }
1182 frame()->SetRegisterSaveAreaSize(register_save_area_size);
1183 __ MultiPush(saves);
1184 }
1185 } else if (descriptor->IsJSFunctionCall()) {
1186 CompilationInfo* info = this->info();
1187 __ Prologue(info->IsCodePreAgingActive());
1188 frame()->SetRegisterSaveAreaSize(
1189 StandardFrameConstants::kFixedFrameSizeFromFp);
1190
1191 // Sloppy mode functions and builtins need to replace the receiver with the
1192 // global proxy when called as functions (without an explicit receiver
1193 // object).
1194 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
1195 if (info->strict_mode() == SLOPPY && !info->is_native()) {
1196 Label ok;
1197 // +2 for return address and saved frame pointer.
1198 int receiver_slot = info->scope()->num_parameters() + 2;
1199 __ ld(a2, MemOperand(fp, receiver_slot * kPointerSize));
1200 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
1201 __ Branch(&ok, ne, a2, Operand(at));
1202
1203 __ ld(a2, GlobalObjectOperand());
1204 __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
1205 __ sd(a2, MemOperand(fp, receiver_slot * kPointerSize));
1206 __ bind(&ok);
1207 }
1208 } else {
1209 __ StubPrologue();
1210 frame()->SetRegisterSaveAreaSize(
1211 StandardFrameConstants::kFixedFrameSizeFromFp);
1212 }
1213 int stack_slots = frame()->GetSpillSlotCount();
1214 if (stack_slots > 0) {
1215 __ Dsubu(sp, sp, Operand(stack_slots * kPointerSize));
1216 }
1217}
1218
1219
1220void CodeGenerator::AssembleReturn() {
1221 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1222 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1223 if (frame()->GetRegisterSaveAreaSize() > 0) {
1224 // Remove this frame's spill slots first.
1225 int stack_slots = frame()->GetSpillSlotCount();
1226 if (stack_slots > 0) {
1227 __ Daddu(sp, sp, Operand(stack_slots * kPointerSize));
1228 }
1229 // Restore registers.
1230 const RegList saves = descriptor->CalleeSavedRegisters();
1231 if (saves != 0) {
1232 __ MultiPop(saves);
1233 }
1234 }
1235 __ mov(sp, fp);
1236 __ Pop(ra, fp);
1237 __ Ret();
1238 } else {
1239 __ mov(sp, fp);
1240 __ Pop(ra, fp);
1241 int pop_count = descriptor->IsJSFunctionCall()
1242 ? static_cast<int>(descriptor->JSParameterCount())
1243 : 0;
1244 __ DropAndRet(pop_count);
1245 }
1246}
1247
1248
1249void CodeGenerator::AssembleMove(InstructionOperand* source,
1250 InstructionOperand* destination) {
1251 MipsOperandConverter g(this, NULL);
1252 // Dispatch on the source and destination operand kinds. Not all
1253 // combinations are possible.
1254 if (source->IsRegister()) {
1255 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1256 Register src = g.ToRegister(source);
1257 if (destination->IsRegister()) {
1258 __ mov(g.ToRegister(destination), src);
1259 } else {
1260 __ sd(src, g.ToMemOperand(destination));
1261 }
1262 } else if (source->IsStackSlot()) {
1263 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1264 MemOperand src = g.ToMemOperand(source);
1265 if (destination->IsRegister()) {
1266 __ ld(g.ToRegister(destination), src);
1267 } else {
1268 Register temp = kScratchReg;
1269 __ ld(temp, src);
1270 __ sd(temp, g.ToMemOperand(destination));
1271 }
1272 } else if (source->IsConstant()) {
1273 Constant src = g.ToConstant(source);
1274 if (destination->IsRegister() || destination->IsStackSlot()) {
1275 Register dst =
1276 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1277 switch (src.type()) {
1278 case Constant::kInt32:
1279 __ li(dst, Operand(src.ToInt32()));
1280 break;
1281 case Constant::kFloat32:
1282 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1283 break;
1284 case Constant::kInt64:
1285 __ li(dst, Operand(src.ToInt64()));
1286 break;
1287 case Constant::kFloat64:
1288 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1289 break;
1290 case Constant::kExternalReference:
1291 __ li(dst, Operand(src.ToExternalReference()));
1292 break;
1293 case Constant::kHeapObject:
1294 __ li(dst, src.ToHeapObject());
1295 break;
1296 case Constant::kRpoNumber:
1297 UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
1298 break;
1299 }
1300 if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
1301 } else if (src.type() == Constant::kFloat32) {
1302 if (destination->IsDoubleStackSlot()) {
1303 MemOperand dst = g.ToMemOperand(destination);
1304 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
1305 __ sw(at, dst);
1306 } else {
1307 FloatRegister dst = g.ToSingleRegister(destination);
1308 __ Move(dst, src.ToFloat32());
1309 }
1310 } else {
1311 DCHECK_EQ(Constant::kFloat64, src.type());
1312 DoubleRegister dst = destination->IsDoubleRegister()
1313 ? g.ToDoubleRegister(destination)
1314 : kScratchDoubleReg;
1315 __ Move(dst, src.ToFloat64());
1316 if (destination->IsDoubleStackSlot()) {
1317 __ sdc1(dst, g.ToMemOperand(destination));
1318 }
1319 }
1320 } else if (source->IsDoubleRegister()) {
1321 FPURegister src = g.ToDoubleRegister(source);
1322 if (destination->IsDoubleRegister()) {
1323 FPURegister dst = g.ToDoubleRegister(destination);
1324 __ Move(dst, src);
1325 } else {
1326 DCHECK(destination->IsDoubleStackSlot());
1327 __ sdc1(src, g.ToMemOperand(destination));
1328 }
1329 } else if (source->IsDoubleStackSlot()) {
1330 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1331 MemOperand src = g.ToMemOperand(source);
1332 if (destination->IsDoubleRegister()) {
1333 __ ldc1(g.ToDoubleRegister(destination), src);
1334 } else {
1335 FPURegister temp = kScratchDoubleReg;
1336 __ ldc1(temp, src);
1337 __ sdc1(temp, g.ToMemOperand(destination));
1338 }
1339 } else {
1340 UNREACHABLE();
1341 }
1342}
1343
1344
1345void CodeGenerator::AssembleSwap(InstructionOperand* source,
1346 InstructionOperand* destination) {
1347 MipsOperandConverter g(this, NULL);
1348 // Dispatch on the source and destination operand kinds. Not all
1349 // combinations are possible.
1350 if (source->IsRegister()) {
1351 // Register-register.
1352 Register temp = kScratchReg;
1353 Register src = g.ToRegister(source);
1354 if (destination->IsRegister()) {
1355 Register dst = g.ToRegister(destination);
1356 __ Move(temp, src);
1357 __ Move(src, dst);
1358 __ Move(dst, temp);
1359 } else {
1360 DCHECK(destination->IsStackSlot());
1361 MemOperand dst = g.ToMemOperand(destination);
1362 __ mov(temp, src);
1363 __ ld(src, dst);
1364 __ sd(temp, dst);
1365 }
1366 } else if (source->IsStackSlot()) {
1367 DCHECK(destination->IsStackSlot());
1368 Register temp_0 = kScratchReg;
1369 Register temp_1 = kScratchReg2;
1370 MemOperand src = g.ToMemOperand(source);
1371 MemOperand dst = g.ToMemOperand(destination);
1372 __ ld(temp_0, src);
1373 __ ld(temp_1, dst);
1374 __ sd(temp_0, dst);
1375 __ sd(temp_1, src);
1376 } else if (source->IsDoubleRegister()) {
1377 FPURegister temp = kScratchDoubleReg;
1378 FPURegister src = g.ToDoubleRegister(source);
1379 if (destination->IsDoubleRegister()) {
1380 FPURegister dst = g.ToDoubleRegister(destination);
1381 __ Move(temp, src);
1382 __ Move(src, dst);
1383 __ Move(dst, temp);
1384 } else {
1385 DCHECK(destination->IsDoubleStackSlot());
1386 MemOperand dst = g.ToMemOperand(destination);
1387 __ Move(temp, src);
1388 __ ldc1(src, dst);
1389 __ sdc1(temp, dst);
1390 }
1391 } else if (source->IsDoubleStackSlot()) {
1392 DCHECK(destination->IsDoubleStackSlot());
1393 Register temp_0 = kScratchReg;
1394 FPURegister temp_1 = kScratchDoubleReg;
1395 MemOperand src0 = g.ToMemOperand(source);
1396 MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
1397 MemOperand dst0 = g.ToMemOperand(destination);
1398 MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
1399 __ ldc1(temp_1, dst0); // Save destination in temp_1.
1400 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
1401 __ sw(temp_0, dst0);
1402 __ lw(temp_0, src1);
1403 __ sw(temp_0, dst1);
1404 __ sdc1(temp_1, src0);
1405 } else {
1406 // No other combinations are possible.
1407 UNREACHABLE();
1408 }
1409}
1410
1411
1412void CodeGenerator::AddNopForSmiCodeInlining() {
1413 // Unused on 32-bit ARM. Still exists on 64-bit arm.
1414 // TODO(plind): Unclear when this is called now. Understand, fix if needed.
1415 __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
1416}
1417
1418
1419void CodeGenerator::EnsureSpaceForLazyDeopt() {
1420 int space_needed = Deoptimizer::patch_size();
1421 if (!info()->IsStub()) {
1422 // Ensure that we have enough space after the previous lazy-bailout
1423 // instruction for patching the code here.
1424 int current_pc = masm()->pc_offset();
1425 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1426 // Block tramoline pool emission for duration of padding.
1427 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
1428 masm());
1429 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1430 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1431 while (padding_size > 0) {
1432 __ nop();
1433 padding_size -= v8::internal::Assembler::kInstrSize;
1434 }
1435 }
1436 }
1437 MarkLazyDeoptSite();
1438}
1439
1440#undef __
1441
1442} // namespace compiler
1443} // namespace internal
1444} // namespace v8