blob: 5a8dd2cd37a380611ab4732ded855bd8bce30ab1 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2012 the V8 project authors. All rights reserved.
34
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000035#include "src/mips64/assembler-mips64.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036
37#if V8_TARGET_ARCH_MIPS64
38
39#include "src/base/cpu.h"
40#include "src/mips64/assembler-mips64-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000041
42namespace v8 {
43namespace internal {
44
45
46// Get the CPU features enabled by the build. For cross compilation the
47// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
48// can be defined to enable FPU instructions when building the
49// snapshot.
50static unsigned CpuFeaturesImpliedByCompiler() {
51 unsigned answer = 0;
52#ifdef CAN_USE_FPU_INSTRUCTIONS
53 answer |= 1u << FPU;
54#endif // def CAN_USE_FPU_INSTRUCTIONS
55
56 // If the compiler is allowed to use FPU then we can use FPU too in our code
57 // generation even when generating snapshots. This won't work for cross
58 // compilation.
59#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
60 answer |= 1u << FPU;
61#endif
62
63 return answer;
64}
65
66
Ben Murdochb8a8cc12014-11-26 15:28:44 +000067void CpuFeatures::ProbeImpl(bool cross_compile) {
68 supported_ |= CpuFeaturesImpliedByCompiler();
69
70 // Only use statically determined features for cross compile (snapshot).
71 if (cross_compile) return;
72
73 // If the compiler is allowed to use fpu then we can use fpu too in our
74 // code generation.
75#ifndef __mips__
76 // For the simulator build, use FPU.
77 supported_ |= 1u << FPU;
78#else
79 // Probe for additional features at runtime.
80 base::CPU cpu;
81 if (cpu.has_fpu()) supported_ |= 1u << FPU;
82#endif
83}
84
85
86void CpuFeatures::PrintTarget() { }
87void CpuFeatures::PrintFeatures() { }
88
89
90int ToNumber(Register reg) {
91 DCHECK(reg.is_valid());
92 const int kNumbers[] = {
93 0, // zero_reg
94 1, // at
95 2, // v0
96 3, // v1
97 4, // a0
98 5, // a1
99 6, // a2
100 7, // a3
101 8, // a4
102 9, // a5
103 10, // a6
104 11, // a7
105 12, // t0
106 13, // t1
107 14, // t2
108 15, // t3
109 16, // s0
110 17, // s1
111 18, // s2
112 19, // s3
113 20, // s4
114 21, // s5
115 22, // s6
116 23, // s7
117 24, // t8
118 25, // t9
119 26, // k0
120 27, // k1
121 28, // gp
122 29, // sp
123 30, // fp
124 31, // ra
125 };
126 return kNumbers[reg.code()];
127}
128
129
130Register ToRegister(int num) {
131 DCHECK(num >= 0 && num < kNumRegisters);
132 const Register kRegisters[] = {
133 zero_reg,
134 at,
135 v0, v1,
136 a0, a1, a2, a3, a4, a5, a6, a7,
137 t0, t1, t2, t3,
138 s0, s1, s2, s3, s4, s5, s6, s7,
139 t8, t9,
140 k0, k1,
141 gp,
142 sp,
143 fp,
144 ra
145 };
146 return kRegisters[num];
147}
148
149
150// -----------------------------------------------------------------------------
151// Implementation of RelocInfo.
152
153const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000154 1 << RelocInfo::INTERNAL_REFERENCE |
155 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000156
157
158bool RelocInfo::IsCodedSpecially() {
159 // The deserializer needs to know whether a pointer is specially coded. Being
160 // specially coded on MIPS means that it is a lui/ori instruction, and that is
161 // always the case inside code objects.
162 return true;
163}
164
165
166bool RelocInfo::IsInConstantPool() {
167 return false;
168}
169
170
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000171// -----------------------------------------------------------------------------
172// Implementation of Operand and MemOperand.
173// See assembler-mips-inl.h for inlined constructors.
174
175Operand::Operand(Handle<Object> handle) {
176 AllowDeferredHandleDereference using_raw_address;
177 rm_ = no_reg;
178 // Verify all Objects referred by code are NOT in new space.
179 Object* obj = *handle;
180 if (obj->IsHeapObject()) {
181 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
182 imm64_ = reinterpret_cast<intptr_t>(handle.location());
183 rmode_ = RelocInfo::EMBEDDED_OBJECT;
184 } else {
185 // No relocation needed.
186 imm64_ = reinterpret_cast<intptr_t>(obj);
187 rmode_ = RelocInfo::NONE64;
188 }
189}
190
191
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000192MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000193 offset_ = offset;
194}
195
196
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000197MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
198 OffsetAddend offset_addend)
199 : Operand(rm) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000200 offset_ = unit * multiplier + offset_addend;
201}
202
203
204// -----------------------------------------------------------------------------
205// Specific instructions, constants, and masks.
206
207static const int kNegOffset = 0x00008000;
208// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
209// operations as post-increment of sp.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000210const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
211 (Register::kCode_sp << kRtShift) |
212 (kPointerSize & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000213// daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000214const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
215 (Register::kCode_sp << kRtShift) |
216 (-kPointerSize & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000217// sd(r, MemOperand(sp, 0))
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000218const Instr kPushRegPattern =
219 SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000220// ld(r, MemOperand(sp, 0))
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000221const Instr kPopRegPattern =
222 LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000223
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000224const Instr kLwRegFpOffsetPattern =
225 LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000226
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000227const Instr kSwRegFpOffsetPattern =
228 SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000229
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000230const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
231 (kNegOffset & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000232
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000233const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
234 (kNegOffset & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000235// A mask for the Rt register for push, pop, lw, sw instructions.
236const Instr kRtMask = kRtFieldMask;
237const Instr kLwSwInstrTypeMask = 0xffe00000;
238const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
239const Instr kLwSwOffsetMask = kImm16Mask;
240
241
242Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
243 : AssemblerBase(isolate, buffer, buffer_size),
244 recorded_ast_id_(TypeFeedbackId::None()),
245 positions_recorder_(this) {
246 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
247
248 last_trampoline_pool_end_ = 0;
249 no_trampoline_pool_before_ = 0;
250 trampoline_pool_blocked_nesting_ = 0;
251 // We leave space (16 * kTrampolineSlotsSize)
252 // for BlockTrampolinePoolScope buffer.
253 next_buffer_check_ = FLAG_force_long_branches
254 ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
255 internal_trampoline_exception_ = false;
256 last_bound_pos_ = 0;
257
258 trampoline_emitted_ = FLAG_force_long_branches;
259 unbound_labels_count_ = 0;
260 block_buffer_growth_ = false;
261
262 ClearRecordedAstId();
263}
264
265
266void Assembler::GetCode(CodeDesc* desc) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000267 EmitForbiddenSlotInstruction();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000268 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
269 // Set up code descriptor.
270 desc->buffer = buffer_;
271 desc->buffer_size = buffer_size_;
272 desc->instr_size = pc_offset();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000273 desc->reloc_size =
274 static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000275 desc->origin = this;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000276 desc->constant_pool_size = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000277}
278
279
280void Assembler::Align(int m) {
281 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000282 EmitForbiddenSlotInstruction();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000283 while ((pc_offset() & (m - 1)) != 0) {
284 nop();
285 }
286}
287
288
289void Assembler::CodeTargetAlign() {
290 // No advantage to aligning branch/call targets to more than
291 // single instruction, that I am aware of.
292 Align(4);
293}
294
295
296Register Assembler::GetRtReg(Instr instr) {
297 Register rt;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000298 rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000299 return rt;
300}
301
302
303Register Assembler::GetRsReg(Instr instr) {
304 Register rs;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000305 rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000306 return rs;
307}
308
309
310Register Assembler::GetRdReg(Instr instr) {
311 Register rd;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000312 rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000313 return rd;
314}
315
316
317uint32_t Assembler::GetRt(Instr instr) {
318 return (instr & kRtFieldMask) >> kRtShift;
319}
320
321
322uint32_t Assembler::GetRtField(Instr instr) {
323 return instr & kRtFieldMask;
324}
325
326
327uint32_t Assembler::GetRs(Instr instr) {
328 return (instr & kRsFieldMask) >> kRsShift;
329}
330
331
332uint32_t Assembler::GetRsField(Instr instr) {
333 return instr & kRsFieldMask;
334}
335
336
337uint32_t Assembler::GetRd(Instr instr) {
338 return (instr & kRdFieldMask) >> kRdShift;
339}
340
341
342uint32_t Assembler::GetRdField(Instr instr) {
343 return instr & kRdFieldMask;
344}
345
346
347uint32_t Assembler::GetSa(Instr instr) {
348 return (instr & kSaFieldMask) >> kSaShift;
349}
350
351
352uint32_t Assembler::GetSaField(Instr instr) {
353 return instr & kSaFieldMask;
354}
355
356
357uint32_t Assembler::GetOpcodeField(Instr instr) {
358 return instr & kOpcodeMask;
359}
360
361
362uint32_t Assembler::GetFunction(Instr instr) {
363 return (instr & kFunctionFieldMask) >> kFunctionShift;
364}
365
366
367uint32_t Assembler::GetFunctionField(Instr instr) {
368 return instr & kFunctionFieldMask;
369}
370
371
372uint32_t Assembler::GetImmediate16(Instr instr) {
373 return instr & kImm16Mask;
374}
375
376
377uint32_t Assembler::GetLabelConst(Instr instr) {
378 return instr & ~kImm16Mask;
379}
380
381
382bool Assembler::IsPop(Instr instr) {
383 return (instr & ~kRtMask) == kPopRegPattern;
384}
385
386
387bool Assembler::IsPush(Instr instr) {
388 return (instr & ~kRtMask) == kPushRegPattern;
389}
390
391
392bool Assembler::IsSwRegFpOffset(Instr instr) {
393 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
394}
395
396
397bool Assembler::IsLwRegFpOffset(Instr instr) {
398 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
399}
400
401
402bool Assembler::IsSwRegFpNegOffset(Instr instr) {
403 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
404 kSwRegFpNegOffsetPattern);
405}
406
407
408bool Assembler::IsLwRegFpNegOffset(Instr instr) {
409 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
410 kLwRegFpNegOffsetPattern);
411}
412
413
414// Labels refer to positions in the (to be) generated code.
415// There are bound, linked, and unused labels.
416//
417// Bound labels refer to known positions in the already
418// generated code. pos() is the position the label refers to.
419//
420// Linked labels refer to unknown positions in the code
421// to be generated; pos() is the position of the last
422// instruction using the label.
423
424// The link chain is terminated by a value in the instruction of -1,
425// which is an otherwise illegal value (branch -1 is inf loop).
426// The instruction 16-bit offset field addresses 32-bit words, but in
427// code is conv to an 18-bit value addressing bytes, hence the -4 value.
428
429const int kEndOfChain = -4;
430// Determines the end of the Jump chain (a subset of the label link chain).
431const int kEndOfJumpChain = 0;
432
433
434bool Assembler::IsBranch(Instr instr) {
435 uint32_t opcode = GetOpcodeField(instr);
436 uint32_t rt_field = GetRtField(instr);
437 uint32_t rs_field = GetRsField(instr);
438 // Checks if the instruction is a branch.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000439 bool isBranch =
440 opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
441 opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000442 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
443 rt_field == BLTZAL || rt_field == BGEZAL)) ||
444 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
445 (opcode == COP1 && rs_field == BC1EQZ) ||
446 (opcode == COP1 && rs_field == BC1NEZ);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000447 if (!isBranch && kArchVariant == kMips64r6) {
448 // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
449 // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
450 isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
451 opcode == BALC ||
452 (opcode == POP66 && rs_field != 0) || // BEQZC
453 (opcode == POP76 && rs_field != 0); // BNEZC
454 }
455 return isBranch;
456}
457
458
459bool Assembler::IsBc(Instr instr) {
460 uint32_t opcode = GetOpcodeField(instr);
461 // Checks if the instruction is a BC or BALC.
462 return opcode == BC || opcode == BALC;
463}
464
465
466bool Assembler::IsBzc(Instr instr) {
467 uint32_t opcode = GetOpcodeField(instr);
468 // Checks if the instruction is BEQZC or BNEZC.
469 return (opcode == POP66 && GetRsField(instr) != 0) ||
470 (opcode == POP76 && GetRsField(instr) != 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000471}
472
473
474bool Assembler::IsEmittedConstant(Instr instr) {
475 uint32_t label_constant = GetLabelConst(instr);
476 return label_constant == 0; // Emitted label const in reg-exp engine.
477}
478
479
480bool Assembler::IsBeq(Instr instr) {
481 return GetOpcodeField(instr) == BEQ;
482}
483
484
485bool Assembler::IsBne(Instr instr) {
486 return GetOpcodeField(instr) == BNE;
487}
488
489
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000490bool Assembler::IsBeqzc(Instr instr) {
491 uint32_t opcode = GetOpcodeField(instr);
492 return opcode == POP66 && GetRsField(instr) != 0;
493}
494
495
496bool Assembler::IsBnezc(Instr instr) {
497 uint32_t opcode = GetOpcodeField(instr);
498 return opcode == POP76 && GetRsField(instr) != 0;
499}
500
501
502bool Assembler::IsBeqc(Instr instr) {
503 uint32_t opcode = GetOpcodeField(instr);
504 uint32_t rs = GetRsField(instr);
505 uint32_t rt = GetRtField(instr);
506 return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
507}
508
509
510bool Assembler::IsBnec(Instr instr) {
511 uint32_t opcode = GetOpcodeField(instr);
512 uint32_t rs = GetRsField(instr);
513 uint32_t rt = GetRtField(instr);
514 return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
515}
516
517
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000518bool Assembler::IsJump(Instr instr) {
519 uint32_t opcode = GetOpcodeField(instr);
520 uint32_t rt_field = GetRtField(instr);
521 uint32_t rd_field = GetRdField(instr);
522 uint32_t function_field = GetFunctionField(instr);
523 // Checks if the instruction is a jump.
524 return opcode == J || opcode == JAL ||
525 (opcode == SPECIAL && rt_field == 0 &&
526 ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
527}
528
529
530bool Assembler::IsJ(Instr instr) {
531 uint32_t opcode = GetOpcodeField(instr);
532 // Checks if the instruction is a jump.
533 return opcode == J;
534}
535
536
537bool Assembler::IsJal(Instr instr) {
538 return GetOpcodeField(instr) == JAL;
539}
540
541
542bool Assembler::IsJr(Instr instr) {
543 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
544}
545
546
547bool Assembler::IsJalr(Instr instr) {
548 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
549}
550
551
552bool Assembler::IsLui(Instr instr) {
553 uint32_t opcode = GetOpcodeField(instr);
554 // Checks if the instruction is a load upper immediate.
555 return opcode == LUI;
556}
557
558
559bool Assembler::IsOri(Instr instr) {
560 uint32_t opcode = GetOpcodeField(instr);
561 // Checks if the instruction is a load upper immediate.
562 return opcode == ORI;
563}
564
565
566bool Assembler::IsNop(Instr instr, unsigned int type) {
567 // See Assembler::nop(type).
568 DCHECK(type < 32);
569 uint32_t opcode = GetOpcodeField(instr);
570 uint32_t function = GetFunctionField(instr);
571 uint32_t rt = GetRt(instr);
572 uint32_t rd = GetRd(instr);
573 uint32_t sa = GetSa(instr);
574
575 // Traditional mips nop == sll(zero_reg, zero_reg, 0)
576 // When marking non-zero type, use sll(zero_reg, at, type)
577 // to avoid use of mips ssnop and ehb special encodings
578 // of the sll instruction.
579
580 Register nop_rt_reg = (type == 0) ? zero_reg : at;
581 bool ret = (opcode == SPECIAL && function == SLL &&
582 rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
583 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
584 sa == type);
585
586 return ret;
587}
588
589
590int32_t Assembler::GetBranchOffset(Instr instr) {
591 DCHECK(IsBranch(instr));
592 return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
593}
594
595
596bool Assembler::IsLw(Instr instr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000597 return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000598}
599
600
601int16_t Assembler::GetLwOffset(Instr instr) {
602 DCHECK(IsLw(instr));
603 return ((instr & kImm16Mask));
604}
605
606
607Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
608 DCHECK(IsLw(instr));
609
610 // We actually create a new lw instruction based on the original one.
611 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
612 | (offset & kImm16Mask);
613
614 return temp_instr;
615}
616
617
618bool Assembler::IsSw(Instr instr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000619 return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000620}
621
622
623Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
624 DCHECK(IsSw(instr));
625 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
626}
627
628
629bool Assembler::IsAddImmediate(Instr instr) {
630 return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
631}
632
633
634Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
635 DCHECK(IsAddImmediate(instr));
636 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
637}
638
639
640bool Assembler::IsAndImmediate(Instr instr) {
641 return GetOpcodeField(instr) == ANDI;
642}
643
644
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000645static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
646 if (kArchVariant == kMips64r6) {
647 if (Assembler::IsBc(instr)) {
648 return Assembler::OffsetSize::kOffset26;
649 } else if (Assembler::IsBzc(instr)) {
650 return Assembler::OffsetSize::kOffset21;
651 }
652 }
653 return Assembler::OffsetSize::kOffset16;
654}
655
656
657static inline int32_t AddBranchOffset(int pos, Instr instr) {
658 int bits = OffsetSizeInBits(instr);
659 const int32_t mask = (1 << bits) - 1;
660 bits = 32 - bits;
661
662 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
663 // the compiler uses arithmetic shifts for signed integers.
664 int32_t imm = ((instr & mask) << bits) >> (bits - 2);
665
666 if (imm == kEndOfChain) {
667 // EndOfChain sentinel is returned directly, not relative to pc or pos.
668 return kEndOfChain;
669 } else {
670 return pos + Assembler::kBranchPCOffset + imm;
671 }
672}
673
674
675int Assembler::target_at(int pos, bool is_internal) {
676 if (is_internal) {
677 int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
678 int64_t address = *p;
679 if (address == kEndOfJumpChain) {
680 return kEndOfChain;
681 } else {
682 int64_t instr_address = reinterpret_cast<int64_t>(p);
683 DCHECK(instr_address - address < INT_MAX);
684 int delta = static_cast<int>(instr_address - address);
685 DCHECK(pos > delta);
686 return pos - delta;
687 }
688 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000689 Instr instr = instr_at(pos);
690 if ((instr & ~kImm16Mask) == 0) {
691 // Emitted label constant, not part of a branch.
692 if (instr == 0) {
693 return kEndOfChain;
694 } else {
695 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
696 return (imm18 + pos);
697 }
698 }
699 // Check we have a branch or jump instruction.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000700 DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000701 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
702 // the compiler uses arithmetic shifts for signed integers.
703 if (IsBranch(instr)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000704 return AddBranchOffset(pos, instr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000705 } else if (IsLui(instr)) {
706 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
707 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
708 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
709 DCHECK(IsOri(instr_ori));
710 DCHECK(IsOri(instr_ori2));
711
712 // TODO(plind) create named constants for shift values.
713 int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
714 imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
715 imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
716 // Sign extend address;
717 imm >>= 16;
718
719 if (imm == kEndOfJumpChain) {
720 // EndOfChain sentinel is returned directly, not relative to pc or pos.
721 return kEndOfChain;
722 } else {
723 uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000724 DCHECK(instr_address - imm < INT_MAX);
725 int delta = static_cast<int>(instr_address - imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000726 DCHECK(pos > delta);
727 return pos - delta;
728 }
729 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000730 DCHECK(IsJ(instr) || IsJal(instr));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000731 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
732 if (imm28 == kEndOfJumpChain) {
733 // EndOfChain sentinel is returned directly, not relative to pc or pos.
734 return kEndOfChain;
735 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000736 // Sign extend 28-bit offset.
737 int32_t delta = static_cast<int32_t>((imm28 << 4) >> 4);
738 return pos + delta;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000739 }
740 }
741}
742
743
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000744static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
745 Instr instr) {
746 int32_t bits = OffsetSizeInBits(instr);
747 int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
748 DCHECK((imm & 3) == 0);
749 imm >>= 2;
750
751 const int32_t mask = (1 << bits) - 1;
752 instr &= ~mask;
753 DCHECK(is_intn(imm, bits));
754
755 return instr | (imm & mask);
756}
757
758
759void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
760 if (is_internal) {
761 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
762 *reinterpret_cast<uint64_t*>(buffer_ + pos) = imm;
763 return;
764 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000765 Instr instr = instr_at(pos);
766 if ((instr & ~kImm16Mask) == 0) {
767 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
768 // Emitted label constant, not part of a branch.
769 // Make label relative to Code* of generated Code object.
770 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
771 return;
772 }
773
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000774 if (IsBranch(instr)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000775 instr = SetBranchOffset(pos, target_pos, instr);
776 instr_at_put(pos, instr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000777 } else if (IsLui(instr)) {
778 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
779 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
780 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
781 DCHECK(IsOri(instr_ori));
782 DCHECK(IsOri(instr_ori2));
783
784 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
785 DCHECK((imm & 3) == 0);
786
787 instr_lui &= ~kImm16Mask;
788 instr_ori &= ~kImm16Mask;
789 instr_ori2 &= ~kImm16Mask;
790
791 instr_at_put(pos + 0 * Assembler::kInstrSize,
792 instr_lui | ((imm >> 32) & kImm16Mask));
793 instr_at_put(pos + 1 * Assembler::kInstrSize,
794 instr_ori | ((imm >> 16) & kImm16Mask));
795 instr_at_put(pos + 3 * Assembler::kInstrSize,
796 instr_ori2 | (imm & kImm16Mask));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000797 } else if (IsJ(instr) || IsJal(instr)) {
798 int32_t imm28 = target_pos - pos;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000799 DCHECK((imm28 & 3) == 0);
800
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000801 uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000802 DCHECK(is_uint26(imm26));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000803 // Place 26-bit signed offset with markings.
804 // When code is committed it will be resolved to j/jal.
805 int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark;
806 instr_at_put(pos, mark | (imm26 & kImm26Mask));
807 } else {
808 int32_t imm28 = target_pos - pos;
809 DCHECK((imm28 & 3) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000810
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000811 uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
812 DCHECK(is_uint26(imm26));
813 // Place raw 26-bit signed offset.
814 // When code is committed it will be resolved to j/jal.
815 instr &= ~kImm26Mask;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000816 instr_at_put(pos, instr | (imm26 & kImm26Mask));
817 }
818}
819
820
821void Assembler::print(Label* L) {
822 if (L->is_unused()) {
823 PrintF("unused label\n");
824 } else if (L->is_bound()) {
825 PrintF("bound label to %d\n", L->pos());
826 } else if (L->is_linked()) {
827 Label l = *L;
828 PrintF("unbound label");
829 while (l.is_linked()) {
830 PrintF("@ %d ", l.pos());
831 Instr instr = instr_at(l.pos());
832 if ((instr & ~kImm16Mask) == 0) {
833 PrintF("value\n");
834 } else {
835 PrintF("%d\n", instr);
836 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000837 next(&l, internal_reference_positions_.find(l.pos()) !=
838 internal_reference_positions_.end());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000839 }
840 } else {
841 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
842 }
843}
844
845
846void Assembler::bind_to(Label* L, int pos) {
847 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000848 int trampoline_pos = kInvalidSlotPos;
849 bool is_internal = false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000850 if (L->is_linked() && !trampoline_emitted_) {
851 unbound_labels_count_--;
852 next_buffer_check_ += kTrampolineSlotsSize;
853 }
854
855 while (L->is_linked()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000856 int fixup_pos = L->pos();
857 int dist = pos - fixup_pos;
858 is_internal = internal_reference_positions_.find(fixup_pos) !=
859 internal_reference_positions_.end();
860 next(L, is_internal); // Call next before overwriting link with target at
861 // fixup_pos.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000862 Instr instr = instr_at(fixup_pos);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000863 if (is_internal) {
864 target_at_put(fixup_pos, pos, is_internal);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000865 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000866 if (IsBranch(instr)) {
867 int branch_offset = BranchOffset(instr);
868 if (dist > branch_offset) {
869 if (trampoline_pos == kInvalidSlotPos) {
870 trampoline_pos = get_trampoline_entry(fixup_pos);
871 CHECK(trampoline_pos != kInvalidSlotPos);
872 }
873 CHECK((trampoline_pos - fixup_pos) <= branch_offset);
874 target_at_put(fixup_pos, trampoline_pos, false);
875 fixup_pos = trampoline_pos;
876 dist = pos - fixup_pos;
877 }
878 target_at_put(fixup_pos, pos, false);
879 } else {
880 DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
881 IsEmittedConstant(instr));
882 target_at_put(fixup_pos, pos, false);
883 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000884 }
885 }
886 L->bind_to(pos);
887
888 // Keep track of the last bound label so we don't eliminate any instructions
889 // before a bound label.
890 if (pos > last_bound_pos_)
891 last_bound_pos_ = pos;
892}
893
894
895void Assembler::bind(Label* L) {
896 DCHECK(!L->is_bound()); // Label can only be bound once.
897 bind_to(L, pc_offset());
898}
899
900
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000901void Assembler::next(Label* L, bool is_internal) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000902 DCHECK(L->is_linked());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000903 int link = target_at(L->pos(), is_internal);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000904 if (link == kEndOfChain) {
905 L->Unuse();
906 } else {
907 DCHECK(link >= 0);
908 L->link_to(link);
909 }
910}
911
912
913bool Assembler::is_near(Label* L) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000914 DCHECK(L->is_bound());
915 return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
916}
917
918
919bool Assembler::is_near(Label* L, OffsetSize bits) {
920 if (L == nullptr || !L->is_bound()) return true;
921 return ((pc_offset() - L->pos()) <
922 (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
923}
924
925
926bool Assembler::is_near_branch(Label* L) {
927 DCHECK(L->is_bound());
928 return kArchVariant == kMips64r6 ? is_near_r6(L) : is_near_pre_r6(L);
929}
930
931
932int Assembler::BranchOffset(Instr instr) {
933 // At pre-R6 and for other R6 branches the offset is 16 bits.
934 int bits = OffsetSize::kOffset16;
935
936 if (kArchVariant == kMips64r6) {
937 uint32_t opcode = GetOpcodeField(instr);
938 switch (opcode) {
939 // Checks BC or BALC.
940 case BC:
941 case BALC:
942 bits = OffsetSize::kOffset26;
943 break;
944
945 // Checks BEQZC or BNEZC.
946 case POP66:
947 case POP76:
948 if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
949 break;
950 default:
951 break;
952 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000953 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000954
955 return (1 << (bits + 2 - 1)) - 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000956}
957
958
959// We have to use a temporary register for things that can be relocated even
960// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
961// space. There is no guarantee that the relocated location can be similarly
962// encoded.
963bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
964 return !RelocInfo::IsNone(rmode);
965}
966
967void Assembler::GenInstrRegister(Opcode opcode,
968 Register rs,
969 Register rt,
970 Register rd,
971 uint16_t sa,
972 SecondaryField func) {
973 DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
974 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
975 | (rd.code() << kRdShift) | (sa << kSaShift) | func;
976 emit(instr);
977}
978
979
980void Assembler::GenInstrRegister(Opcode opcode,
981 Register rs,
982 Register rt,
983 uint16_t msb,
984 uint16_t lsb,
985 SecondaryField func) {
986 DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
987 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
988 | (msb << kRdShift) | (lsb << kSaShift) | func;
989 emit(instr);
990}
991
992
993void Assembler::GenInstrRegister(Opcode opcode,
994 SecondaryField fmt,
995 FPURegister ft,
996 FPURegister fs,
997 FPURegister fd,
998 SecondaryField func) {
999 DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1000 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
1001 | (fd.code() << kFdShift) | func;
1002 emit(instr);
1003}
1004
1005
1006void Assembler::GenInstrRegister(Opcode opcode,
1007 FPURegister fr,
1008 FPURegister ft,
1009 FPURegister fs,
1010 FPURegister fd,
1011 SecondaryField func) {
1012 DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1013 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
1014 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1015 emit(instr);
1016}
1017
1018
1019void Assembler::GenInstrRegister(Opcode opcode,
1020 SecondaryField fmt,
1021 Register rt,
1022 FPURegister fs,
1023 FPURegister fd,
1024 SecondaryField func) {
1025 DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1026 Instr instr = opcode | fmt | (rt.code() << kRtShift)
1027 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1028 emit(instr);
1029}
1030
1031
1032void Assembler::GenInstrRegister(Opcode opcode,
1033 SecondaryField fmt,
1034 Register rt,
1035 FPUControlRegister fs,
1036 SecondaryField func) {
1037 DCHECK(fs.is_valid() && rt.is_valid());
1038 Instr instr =
1039 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1040 emit(instr);
1041}
1042
1043
1044// Instructions with immediate value.
1045// Registers are in the order of the instruction encoding, from left to right.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001046void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1047 int32_t j,
1048 CompactBranchType is_compact_branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001049 DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1050 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1051 | (j & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001052 emit(instr, is_compact_branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001053}
1054
1055
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001056void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1057 int32_t j,
1058 CompactBranchType is_compact_branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001059 DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1060 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001061 emit(instr, is_compact_branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001062}
1063
1064
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001065void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1066 int32_t j,
1067 CompactBranchType is_compact_branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001068 DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1069 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
1070 | (j & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001071 emit(instr, is_compact_branch);
1072}
1073
1074
1075void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1076 CompactBranchType is_compact_branch) {
1077 DCHECK(rs.is_valid() && (is_int21(offset21)));
1078 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1079 emit(instr, is_compact_branch);
1080}
1081
1082
1083void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1084 uint32_t offset21) {
1085 DCHECK(rs.is_valid() && (is_uint21(offset21)));
1086 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001087 emit(instr);
1088}
1089
1090
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001091void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1092 CompactBranchType is_compact_branch) {
1093 DCHECK(is_int26(offset26));
1094 Instr instr = opcode | (offset26 & kImm26Mask);
1095 emit(instr, is_compact_branch);
1096}
1097
1098
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001099void Assembler::GenInstrJump(Opcode opcode,
1100 uint32_t address) {
1101 BlockTrampolinePoolScope block_trampoline_pool(this);
1102 DCHECK(is_uint26(address));
1103 Instr instr = opcode | address;
1104 emit(instr);
1105 BlockTrampolinePoolFor(1); // For associated delay slot.
1106}
1107
1108
1109// Returns the next free trampoline entry.
1110int32_t Assembler::get_trampoline_entry(int32_t pos) {
1111 int32_t trampoline_entry = kInvalidSlotPos;
1112 if (!internal_trampoline_exception_) {
1113 if (trampoline_.start() > pos) {
1114 trampoline_entry = trampoline_.take_slot();
1115 }
1116
1117 if (kInvalidSlotPos == trampoline_entry) {
1118 internal_trampoline_exception_ = true;
1119 }
1120 }
1121 return trampoline_entry;
1122}
1123
1124
1125uint64_t Assembler::jump_address(Label* L) {
1126 int64_t target_pos;
1127 if (L->is_bound()) {
1128 target_pos = L->pos();
1129 } else {
1130 if (L->is_linked()) {
1131 target_pos = L->pos(); // L's link.
1132 L->link_to(pc_offset());
1133 } else {
1134 L->link_to(pc_offset());
1135 return kEndOfJumpChain;
1136 }
1137 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001138 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
1139 DCHECK((imm & 3) == 0);
1140
1141 return imm;
1142}
1143
1144
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001145uint64_t Assembler::jump_offset(Label* L) {
1146 int64_t target_pos;
1147 int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1148
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001149 if (L->is_bound()) {
1150 target_pos = L->pos();
1151 } else {
1152 if (L->is_linked()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001153 target_pos = L->pos(); // L's link.
1154 L->link_to(pc_offset() + pad);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001155 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001156 L->link_to(pc_offset() + pad);
1157 return kEndOfJumpChain;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001158 }
1159 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001160 int64_t imm = target_pos - (pc_offset() + pad);
1161 DCHECK((imm & 3) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001162
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001163 return static_cast<uint64_t>(imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001164}
1165
1166
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001167int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001168 int32_t target_pos;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001169 int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1170
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001171 if (L->is_bound()) {
1172 target_pos = L->pos();
1173 } else {
1174 if (L->is_linked()) {
1175 target_pos = L->pos();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001176 L->link_to(pc_offset() + pad);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001177 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001178 L->link_to(pc_offset() + pad);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001179 if (!trampoline_emitted_) {
1180 unbound_labels_count_++;
1181 next_buffer_check_ -= kTrampolineSlotsSize;
1182 }
1183 return kEndOfChain;
1184 }
1185 }
1186
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001187 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1188 DCHECK(is_intn(offset, bits + 2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001189 DCHECK((offset & 3) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001190
1191 return offset;
1192}
1193
1194
1195void Assembler::label_at_put(Label* L, int at_offset) {
1196 int target_pos;
1197 if (L->is_bound()) {
1198 target_pos = L->pos();
1199 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1200 } else {
1201 if (L->is_linked()) {
1202 target_pos = L->pos(); // L's link.
1203 int32_t imm18 = target_pos - at_offset;
1204 DCHECK((imm18 & 3) == 0);
1205 int32_t imm16 = imm18 >> 2;
1206 DCHECK(is_int16(imm16));
1207 instr_at_put(at_offset, (imm16 & kImm16Mask));
1208 } else {
1209 target_pos = kEndOfChain;
1210 instr_at_put(at_offset, 0);
1211 if (!trampoline_emitted_) {
1212 unbound_labels_count_++;
1213 next_buffer_check_ -= kTrampolineSlotsSize;
1214 }
1215 }
1216 L->link_to(at_offset);
1217 }
1218}
1219
1220
1221//------- Branch and jump instructions --------
1222
1223void Assembler::b(int16_t offset) {
1224 beq(zero_reg, zero_reg, offset);
1225}
1226
1227
1228void Assembler::bal(int16_t offset) {
1229 positions_recorder()->WriteRecordedPositions();
1230 bgezal(zero_reg, offset);
1231}
1232
1233
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001234void Assembler::bc(int32_t offset) {
1235 DCHECK(kArchVariant == kMips64r6);
1236 GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1237}
1238
1239
1240void Assembler::balc(int32_t offset) {
1241 DCHECK(kArchVariant == kMips64r6);
1242 positions_recorder()->WriteRecordedPositions();
1243 GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1244}
1245
1246
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001247void Assembler::beq(Register rs, Register rt, int16_t offset) {
1248 BlockTrampolinePoolScope block_trampoline_pool(this);
1249 GenInstrImmediate(BEQ, rs, rt, offset);
1250 BlockTrampolinePoolFor(1); // For associated delay slot.
1251}
1252
1253
1254void Assembler::bgez(Register rs, int16_t offset) {
1255 BlockTrampolinePoolScope block_trampoline_pool(this);
1256 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1257 BlockTrampolinePoolFor(1); // For associated delay slot.
1258}
1259
1260
1261void Assembler::bgezc(Register rt, int16_t offset) {
1262 DCHECK(kArchVariant == kMips64r6);
1263 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001264 GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001265}
1266
1267
1268void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1269 DCHECK(kArchVariant == kMips64r6);
1270 DCHECK(!(rs.is(zero_reg)));
1271 DCHECK(!(rt.is(zero_reg)));
1272 DCHECK(rs.code() != rt.code());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001273 GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001274}
1275
1276
1277void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1278 DCHECK(kArchVariant == kMips64r6);
1279 DCHECK(!(rs.is(zero_reg)));
1280 DCHECK(!(rt.is(zero_reg)));
1281 DCHECK(rs.code() != rt.code());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001282 GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001283}
1284
1285
1286void Assembler::bgezal(Register rs, int16_t offset) {
1287 DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1288 BlockTrampolinePoolScope block_trampoline_pool(this);
1289 positions_recorder()->WriteRecordedPositions();
1290 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1291 BlockTrampolinePoolFor(1); // For associated delay slot.
1292}
1293
1294
1295void Assembler::bgtz(Register rs, int16_t offset) {
1296 BlockTrampolinePoolScope block_trampoline_pool(this);
1297 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1298 BlockTrampolinePoolFor(1); // For associated delay slot.
1299}
1300
1301
1302void Assembler::bgtzc(Register rt, int16_t offset) {
1303 DCHECK(kArchVariant == kMips64r6);
1304 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001305 GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1306 CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001307}
1308
1309
1310void Assembler::blez(Register rs, int16_t offset) {
1311 BlockTrampolinePoolScope block_trampoline_pool(this);
1312 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1313 BlockTrampolinePoolFor(1); // For associated delay slot.
1314}
1315
1316
1317void Assembler::blezc(Register rt, int16_t offset) {
1318 DCHECK(kArchVariant == kMips64r6);
1319 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001320 GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1321 CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001322}
1323
1324
1325void Assembler::bltzc(Register rt, int16_t offset) {
1326 DCHECK(kArchVariant == kMips64r6);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001327 DCHECK(!rt.is(zero_reg));
1328 GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001329}
1330
1331
1332void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1333 DCHECK(kArchVariant == kMips64r6);
1334 DCHECK(!(rs.is(zero_reg)));
1335 DCHECK(!(rt.is(zero_reg)));
1336 DCHECK(rs.code() != rt.code());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001337 GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001338}
1339
1340
1341void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1342 DCHECK(kArchVariant == kMips64r6);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001343 DCHECK(!rs.is(zero_reg));
1344 DCHECK(!rt.is(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001345 DCHECK(rs.code() != rt.code());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001346 GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001347}
1348
1349
1350void Assembler::bltz(Register rs, int16_t offset) {
1351 BlockTrampolinePoolScope block_trampoline_pool(this);
1352 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1353 BlockTrampolinePoolFor(1); // For associated delay slot.
1354}
1355
1356
1357void Assembler::bltzal(Register rs, int16_t offset) {
1358 DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1359 BlockTrampolinePoolScope block_trampoline_pool(this);
1360 positions_recorder()->WriteRecordedPositions();
1361 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1362 BlockTrampolinePoolFor(1); // For associated delay slot.
1363}
1364
1365
1366void Assembler::bne(Register rs, Register rt, int16_t offset) {
1367 BlockTrampolinePoolScope block_trampoline_pool(this);
1368 GenInstrImmediate(BNE, rs, rt, offset);
1369 BlockTrampolinePoolFor(1); // For associated delay slot.
1370}
1371
1372
1373void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1374 DCHECK(kArchVariant == kMips64r6);
Ben Murdochda12d292016-06-02 14:46:10 +01001375 if (rs.code() >= rt.code()) {
1376 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1377 } else {
1378 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1379 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001380}
1381
1382
1383void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1384 DCHECK(kArchVariant == kMips64r6);
Ben Murdochda12d292016-06-02 14:46:10 +01001385 if (rs.code() >= rt.code()) {
1386 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1387 } else {
1388 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1389 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001390}
1391
1392
1393void Assembler::blezalc(Register rt, int16_t offset) {
1394 DCHECK(kArchVariant == kMips64r6);
1395 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001396 positions_recorder()->WriteRecordedPositions();
1397 GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1398 CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001399}
1400
1401
1402void Assembler::bgezalc(Register rt, int16_t offset) {
1403 DCHECK(kArchVariant == kMips64r6);
1404 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001405 positions_recorder()->WriteRecordedPositions();
1406 GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001407}
1408
1409
1410void Assembler::bgezall(Register rs, int16_t offset) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001411 DCHECK(kArchVariant != kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001412 DCHECK(!(rs.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001413 BlockTrampolinePoolScope block_trampoline_pool(this);
1414 positions_recorder()->WriteRecordedPositions();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001415 GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001416 BlockTrampolinePoolFor(1); // For associated delay slot.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001417}
1418
1419
1420void Assembler::bltzalc(Register rt, int16_t offset) {
1421 DCHECK(kArchVariant == kMips64r6);
1422 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001423 positions_recorder()->WriteRecordedPositions();
1424 GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001425}
1426
1427
1428void Assembler::bgtzalc(Register rt, int16_t offset) {
1429 DCHECK(kArchVariant == kMips64r6);
1430 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001431 positions_recorder()->WriteRecordedPositions();
1432 GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1433 CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001434}
1435
1436
1437void Assembler::beqzalc(Register rt, int16_t offset) {
1438 DCHECK(kArchVariant == kMips64r6);
1439 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001440 positions_recorder()->WriteRecordedPositions();
1441 GenInstrImmediate(ADDI, zero_reg, rt, offset,
1442 CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001443}
1444
1445
1446void Assembler::bnezalc(Register rt, int16_t offset) {
1447 DCHECK(kArchVariant == kMips64r6);
1448 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001449 positions_recorder()->WriteRecordedPositions();
1450 GenInstrImmediate(DADDI, zero_reg, rt, offset,
1451 CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001452}
1453
1454
1455void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1456 DCHECK(kArchVariant == kMips64r6);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001457 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1458 if (rs.code() < rt.code()) {
1459 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1460 } else {
1461 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1462 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001463}
1464
1465
1466void Assembler::beqzc(Register rs, int32_t offset) {
1467 DCHECK(kArchVariant == kMips64r6);
1468 DCHECK(!(rs.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001469 GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001470}
1471
1472
1473void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1474 DCHECK(kArchVariant == kMips64r6);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001475 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1476 if (rs.code() < rt.code()) {
1477 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1478 } else {
1479 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1480 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001481}
1482
1483
1484void Assembler::bnezc(Register rs, int32_t offset) {
1485 DCHECK(kArchVariant == kMips64r6);
1486 DCHECK(!(rs.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001487 GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001488}
1489
1490
1491void Assembler::j(int64_t target) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001492 BlockTrampolinePoolScope block_trampoline_pool(this);
1493 GenInstrJump(J, static_cast<uint32_t>(target >> 2) & kImm26Mask);
1494 BlockTrampolinePoolFor(1); // For associated delay slot.
1495}
1496
1497
1498void Assembler::j(Label* target) {
1499 uint64_t imm = jump_offset(target);
1500 if (target->is_bound()) {
1501 BlockTrampolinePoolScope block_trampoline_pool(this);
1502 GenInstrJump(static_cast<Opcode>(kJRawMark),
1503 static_cast<uint32_t>(imm >> 2) & kImm26Mask);
1504 BlockTrampolinePoolFor(1); // For associated delay slot.
1505 } else {
1506 j(imm);
1507 }
1508}
1509
1510
1511void Assembler::jal(Label* target) {
1512 uint64_t imm = jump_offset(target);
1513 if (target->is_bound()) {
1514 BlockTrampolinePoolScope block_trampoline_pool(this);
1515 positions_recorder()->WriteRecordedPositions();
1516 GenInstrJump(static_cast<Opcode>(kJalRawMark),
1517 static_cast<uint32_t>(imm >> 2) & kImm26Mask);
1518 BlockTrampolinePoolFor(1); // For associated delay slot.
1519 } else {
1520 jal(imm);
1521 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001522}
1523
1524
1525void Assembler::jr(Register rs) {
1526 if (kArchVariant != kMips64r6) {
1527 BlockTrampolinePoolScope block_trampoline_pool(this);
1528 if (rs.is(ra)) {
1529 positions_recorder()->WriteRecordedPositions();
1530 }
1531 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1532 BlockTrampolinePoolFor(1); // For associated delay slot.
1533 } else {
1534 jalr(rs, zero_reg);
1535 }
1536}
1537
1538
1539void Assembler::jal(int64_t target) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001540 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001541 positions_recorder()->WriteRecordedPositions();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001542 GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
1543 BlockTrampolinePoolFor(1); // For associated delay slot.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001544}
1545
1546
1547void Assembler::jalr(Register rs, Register rd) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001548 DCHECK(rs.code() != rd.code());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001549 BlockTrampolinePoolScope block_trampoline_pool(this);
1550 positions_recorder()->WriteRecordedPositions();
1551 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1552 BlockTrampolinePoolFor(1); // For associated delay slot.
1553}
1554
1555
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001556void Assembler::jic(Register rt, int16_t offset) {
1557 DCHECK(kArchVariant == kMips64r6);
1558 GenInstrImmediate(POP66, zero_reg, rt, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001559}
1560
1561
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001562void Assembler::jialc(Register rt, int16_t offset) {
1563 DCHECK(kArchVariant == kMips64r6);
1564 positions_recorder()->WriteRecordedPositions();
1565 GenInstrImmediate(POP76, zero_reg, rt, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001566}
1567
1568
1569// -------Data-processing-instructions---------
1570
1571// Arithmetic.
1572
1573void Assembler::addu(Register rd, Register rs, Register rt) {
1574 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1575}
1576
1577
1578void Assembler::addiu(Register rd, Register rs, int32_t j) {
1579 GenInstrImmediate(ADDIU, rs, rd, j);
1580}
1581
1582
1583void Assembler::subu(Register rd, Register rs, Register rt) {
1584 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1585}
1586
1587
1588void Assembler::mul(Register rd, Register rs, Register rt) {
1589 if (kArchVariant == kMips64r6) {
1590 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1591 } else {
1592 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1593 }
1594}
1595
1596
1597void Assembler::muh(Register rd, Register rs, Register rt) {
1598 DCHECK(kArchVariant == kMips64r6);
1599 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1600}
1601
1602
1603void Assembler::mulu(Register rd, Register rs, Register rt) {
1604 DCHECK(kArchVariant == kMips64r6);
1605 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1606}
1607
1608
1609void Assembler::muhu(Register rd, Register rs, Register rt) {
1610 DCHECK(kArchVariant == kMips64r6);
1611 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1612}
1613
1614
1615void Assembler::dmul(Register rd, Register rs, Register rt) {
1616 DCHECK(kArchVariant == kMips64r6);
1617 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
1618}
1619
1620
1621void Assembler::dmuh(Register rd, Register rs, Register rt) {
1622 DCHECK(kArchVariant == kMips64r6);
1623 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
1624}
1625
1626
1627void Assembler::dmulu(Register rd, Register rs, Register rt) {
1628 DCHECK(kArchVariant == kMips64r6);
1629 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
1630}
1631
1632
1633void Assembler::dmuhu(Register rd, Register rs, Register rt) {
1634 DCHECK(kArchVariant == kMips64r6);
1635 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
1636}
1637
1638
1639void Assembler::mult(Register rs, Register rt) {
1640 DCHECK(kArchVariant != kMips64r6);
1641 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1642}
1643
1644
1645void Assembler::multu(Register rs, Register rt) {
1646 DCHECK(kArchVariant != kMips64r6);
1647 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1648}
1649
1650
1651void Assembler::daddiu(Register rd, Register rs, int32_t j) {
1652 GenInstrImmediate(DADDIU, rs, rd, j);
1653}
1654
1655
1656void Assembler::div(Register rs, Register rt) {
1657 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1658}
1659
1660
1661void Assembler::div(Register rd, Register rs, Register rt) {
1662 DCHECK(kArchVariant == kMips64r6);
1663 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1664}
1665
1666
1667void Assembler::mod(Register rd, Register rs, Register rt) {
1668 DCHECK(kArchVariant == kMips64r6);
1669 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1670}
1671
1672
1673void Assembler::divu(Register rs, Register rt) {
1674 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1675}
1676
1677
1678void Assembler::divu(Register rd, Register rs, Register rt) {
1679 DCHECK(kArchVariant == kMips64r6);
1680 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1681}
1682
1683
1684void Assembler::modu(Register rd, Register rs, Register rt) {
1685 DCHECK(kArchVariant == kMips64r6);
1686 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1687}
1688
1689
1690void Assembler::daddu(Register rd, Register rs, Register rt) {
1691 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
1692}
1693
1694
1695void Assembler::dsubu(Register rd, Register rs, Register rt) {
1696 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
1697}
1698
1699
1700void Assembler::dmult(Register rs, Register rt) {
1701 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
1702}
1703
1704
1705void Assembler::dmultu(Register rs, Register rt) {
1706 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
1707}
1708
1709
1710void Assembler::ddiv(Register rs, Register rt) {
1711 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
1712}
1713
1714
1715void Assembler::ddiv(Register rd, Register rs, Register rt) {
1716 DCHECK(kArchVariant == kMips64r6);
1717 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
1718}
1719
1720
1721void Assembler::dmod(Register rd, Register rs, Register rt) {
1722 DCHECK(kArchVariant == kMips64r6);
1723 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
1724}
1725
1726
1727void Assembler::ddivu(Register rs, Register rt) {
1728 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
1729}
1730
1731
1732void Assembler::ddivu(Register rd, Register rs, Register rt) {
1733 DCHECK(kArchVariant == kMips64r6);
1734 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
1735}
1736
1737
1738void Assembler::dmodu(Register rd, Register rs, Register rt) {
1739 DCHECK(kArchVariant == kMips64r6);
1740 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
1741}
1742
1743
1744// Logical.
1745
1746void Assembler::and_(Register rd, Register rs, Register rt) {
1747 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1748}
1749
1750
1751void Assembler::andi(Register rt, Register rs, int32_t j) {
1752 DCHECK(is_uint16(j));
1753 GenInstrImmediate(ANDI, rs, rt, j);
1754}
1755
1756
1757void Assembler::or_(Register rd, Register rs, Register rt) {
1758 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1759}
1760
1761
1762void Assembler::ori(Register rt, Register rs, int32_t j) {
1763 DCHECK(is_uint16(j));
1764 GenInstrImmediate(ORI, rs, rt, j);
1765}
1766
1767
1768void Assembler::xor_(Register rd, Register rs, Register rt) {
1769 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1770}
1771
1772
1773void Assembler::xori(Register rt, Register rs, int32_t j) {
1774 DCHECK(is_uint16(j));
1775 GenInstrImmediate(XORI, rs, rt, j);
1776}
1777
1778
1779void Assembler::nor(Register rd, Register rs, Register rt) {
1780 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1781}
1782
1783
1784// Shifts.
1785void Assembler::sll(Register rd,
1786 Register rt,
1787 uint16_t sa,
1788 bool coming_from_nop) {
1789 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1790 // generated using the sll instruction. They must be generated using
1791 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1792 // instructions.
1793 DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001794 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001795}
1796
1797
1798void Assembler::sllv(Register rd, Register rt, Register rs) {
1799 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1800}
1801
1802
1803void Assembler::srl(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001804 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001805}
1806
1807
1808void Assembler::srlv(Register rd, Register rt, Register rs) {
1809 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1810}
1811
1812
1813void Assembler::sra(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001814 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001815}
1816
1817
1818void Assembler::srav(Register rd, Register rt, Register rs) {
1819 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1820}
1821
1822
1823void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1824 // Should be called via MacroAssembler::Ror.
1825 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001826 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001827 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1828 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1829 emit(instr);
1830}
1831
1832
1833void Assembler::rotrv(Register rd, Register rt, Register rs) {
1834 // Should be called via MacroAssembler::Ror.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001835 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1836 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001837 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1838 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1839 emit(instr);
1840}
1841
1842
1843void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001844 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001845}
1846
1847
1848void Assembler::dsllv(Register rd, Register rt, Register rs) {
1849 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
1850}
1851
1852
1853void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001854 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001855}
1856
1857
1858void Assembler::dsrlv(Register rd, Register rt, Register rs) {
1859 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
1860}
1861
1862
1863void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
1864 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1865 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1866 | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
1867 emit(instr);
1868}
1869
Ben Murdochda12d292016-06-02 14:46:10 +01001870void Assembler::drotr32(Register rd, Register rt, uint16_t sa) {
1871 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1872 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
1873 (rd.code() << kRdShift) | (sa << kSaShift) | DSRL32;
1874 emit(instr);
1875}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001876
1877void Assembler::drotrv(Register rd, Register rt, Register rs) {
1878 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1879 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1880 | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
1881 emit(instr);
1882}
1883
1884
1885void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001886 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001887}
1888
1889
1890void Assembler::dsrav(Register rd, Register rt, Register rs) {
1891 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
1892}
1893
1894
1895void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001896 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001897}
1898
1899
1900void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001901 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001902}
1903
1904
1905void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001906 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA32);
1907}
1908
1909
1910void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
1911 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
Ben Murdochda12d292016-06-02 14:46:10 +01001912 DCHECK(sa <= 3);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001913 DCHECK(kArchVariant == kMips64r6);
Ben Murdochda12d292016-06-02 14:46:10 +01001914 Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
1915 rd.code() << kRdShift | sa << kSaShift | LSA;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001916 emit(instr);
1917}
1918
1919
1920void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
1921 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
Ben Murdochda12d292016-06-02 14:46:10 +01001922 DCHECK(sa <= 3);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001923 DCHECK(kArchVariant == kMips64r6);
Ben Murdochda12d292016-06-02 14:46:10 +01001924 Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
1925 rd.code() << kRdShift | sa << kSaShift | DLSA;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001926 emit(instr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001927}
1928
1929
1930// ------------Memory-instructions-------------
1931
1932// Helper for base-reg + offset, when offset is larger than int16.
1933void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1934 DCHECK(!src.rm().is(at));
1935 DCHECK(is_int32(src.offset_));
1936 daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
1937 dsll(at, at, kLuiShift);
1938 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1939 daddu(at, at, src.rm()); // Add base register.
1940}
1941
1942
1943void Assembler::lb(Register rd, const MemOperand& rs) {
1944 if (is_int16(rs.offset_)) {
1945 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1946 } else { // Offset > 16 bits, use multiple instructions to load.
1947 LoadRegPlusOffsetToAt(rs);
1948 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1949 }
1950}
1951
1952
1953void Assembler::lbu(Register rd, const MemOperand& rs) {
1954 if (is_int16(rs.offset_)) {
1955 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1956 } else { // Offset > 16 bits, use multiple instructions to load.
1957 LoadRegPlusOffsetToAt(rs);
1958 GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1959 }
1960}
1961
1962
1963void Assembler::lh(Register rd, const MemOperand& rs) {
1964 if (is_int16(rs.offset_)) {
1965 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1966 } else { // Offset > 16 bits, use multiple instructions to load.
1967 LoadRegPlusOffsetToAt(rs);
1968 GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1969 }
1970}
1971
1972
1973void Assembler::lhu(Register rd, const MemOperand& rs) {
1974 if (is_int16(rs.offset_)) {
1975 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1976 } else { // Offset > 16 bits, use multiple instructions to load.
1977 LoadRegPlusOffsetToAt(rs);
1978 GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1979 }
1980}
1981
1982
1983void Assembler::lw(Register rd, const MemOperand& rs) {
1984 if (is_int16(rs.offset_)) {
1985 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1986 } else { // Offset > 16 bits, use multiple instructions to load.
1987 LoadRegPlusOffsetToAt(rs);
1988 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1989 }
1990}
1991
1992
1993void Assembler::lwu(Register rd, const MemOperand& rs) {
1994 if (is_int16(rs.offset_)) {
1995 GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
1996 } else { // Offset > 16 bits, use multiple instructions to load.
1997 LoadRegPlusOffsetToAt(rs);
1998 GenInstrImmediate(LWU, at, rd, 0); // Equiv to lwu(rd, MemOperand(at, 0));
1999 }
2000}
2001
2002
2003void Assembler::lwl(Register rd, const MemOperand& rs) {
2004 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
2005}
2006
2007
2008void Assembler::lwr(Register rd, const MemOperand& rs) {
2009 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
2010}
2011
2012
2013void Assembler::sb(Register rd, const MemOperand& rs) {
2014 if (is_int16(rs.offset_)) {
2015 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
2016 } else { // Offset > 16 bits, use multiple instructions to store.
2017 LoadRegPlusOffsetToAt(rs);
2018 GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
2019 }
2020}
2021
2022
2023void Assembler::sh(Register rd, const MemOperand& rs) {
2024 if (is_int16(rs.offset_)) {
2025 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
2026 } else { // Offset > 16 bits, use multiple instructions to store.
2027 LoadRegPlusOffsetToAt(rs);
2028 GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
2029 }
2030}
2031
2032
2033void Assembler::sw(Register rd, const MemOperand& rs) {
2034 if (is_int16(rs.offset_)) {
2035 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
2036 } else { // Offset > 16 bits, use multiple instructions to store.
2037 LoadRegPlusOffsetToAt(rs);
2038 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
2039 }
2040}
2041
2042
2043void Assembler::swl(Register rd, const MemOperand& rs) {
2044 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
2045}
2046
2047
2048void Assembler::swr(Register rd, const MemOperand& rs) {
2049 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
2050}
2051
2052
2053void Assembler::lui(Register rd, int32_t j) {
2054 DCHECK(is_uint16(j));
2055 GenInstrImmediate(LUI, zero_reg, rd, j);
2056}
2057
2058
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002059void Assembler::aui(Register rt, Register rs, int32_t j) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002060 // This instruction uses same opcode as 'lui'. The difference in encoding is
2061 // 'lui' has zero reg. for rs field.
2062 DCHECK(is_uint16(j));
2063 GenInstrImmediate(LUI, rs, rt, j);
2064}
2065
2066
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002067void Assembler::daui(Register rt, Register rs, int32_t j) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002068 DCHECK(is_uint16(j));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002069 DCHECK(!rs.is(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002070 GenInstrImmediate(DAUI, rs, rt, j);
2071}
2072
2073
2074void Assembler::dahi(Register rs, int32_t j) {
2075 DCHECK(is_uint16(j));
2076 GenInstrImmediate(REGIMM, rs, DAHI, j);
2077}
2078
2079
2080void Assembler::dati(Register rs, int32_t j) {
2081 DCHECK(is_uint16(j));
2082 GenInstrImmediate(REGIMM, rs, DATI, j);
2083}
2084
2085
2086void Assembler::ldl(Register rd, const MemOperand& rs) {
2087 GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
2088}
2089
2090
2091void Assembler::ldr(Register rd, const MemOperand& rs) {
2092 GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
2093}
2094
2095
2096void Assembler::sdl(Register rd, const MemOperand& rs) {
2097 GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
2098}
2099
2100
2101void Assembler::sdr(Register rd, const MemOperand& rs) {
2102 GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
2103}
2104
2105
2106void Assembler::ld(Register rd, const MemOperand& rs) {
2107 if (is_int16(rs.offset_)) {
2108 GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
2109 } else { // Offset > 16 bits, use multiple instructions to load.
2110 LoadRegPlusOffsetToAt(rs);
2111 GenInstrImmediate(LD, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
2112 }
2113}
2114
2115
2116void Assembler::sd(Register rd, const MemOperand& rs) {
2117 if (is_int16(rs.offset_)) {
2118 GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
2119 } else { // Offset > 16 bits, use multiple instructions to store.
2120 LoadRegPlusOffsetToAt(rs);
2121 GenInstrImmediate(SD, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
2122 }
2123}
2124
2125
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002126// ---------PC-Relative instructions-----------
2127
2128void Assembler::addiupc(Register rs, int32_t imm19) {
2129 DCHECK(kArchVariant == kMips64r6);
2130 DCHECK(rs.is_valid() && is_int19(imm19));
2131 uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
2132 GenInstrImmediate(PCREL, rs, imm21);
2133}
2134
2135
2136void Assembler::lwpc(Register rs, int32_t offset19) {
2137 DCHECK(kArchVariant == kMips64r6);
2138 DCHECK(rs.is_valid() && is_int19(offset19));
2139 uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
2140 GenInstrImmediate(PCREL, rs, imm21);
2141}
2142
2143
2144void Assembler::lwupc(Register rs, int32_t offset19) {
2145 DCHECK(kArchVariant == kMips64r6);
2146 DCHECK(rs.is_valid() && is_int19(offset19));
2147 uint32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
2148 GenInstrImmediate(PCREL, rs, imm21);
2149}
2150
2151
2152void Assembler::ldpc(Register rs, int32_t offset18) {
2153 DCHECK(kArchVariant == kMips64r6);
2154 DCHECK(rs.is_valid() && is_int18(offset18));
2155 uint32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
2156 GenInstrImmediate(PCREL, rs, imm21);
2157}
2158
2159
2160void Assembler::auipc(Register rs, int16_t imm16) {
2161 DCHECK(kArchVariant == kMips64r6);
2162 DCHECK(rs.is_valid());
2163 uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
2164 GenInstrImmediate(PCREL, rs, imm21);
2165}
2166
2167
2168void Assembler::aluipc(Register rs, int16_t imm16) {
2169 DCHECK(kArchVariant == kMips64r6);
2170 DCHECK(rs.is_valid());
2171 uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
2172 GenInstrImmediate(PCREL, rs, imm21);
2173}
2174
2175
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002176// -------------Misc-instructions--------------
2177
2178// Break / Trap instructions.
2179void Assembler::break_(uint32_t code, bool break_as_stop) {
2180 DCHECK((code & ~0xfffff) == 0);
2181 // We need to invalidate breaks that could be stops as well because the
2182 // simulator expects a char pointer after the stop instruction.
2183 // See constants-mips.h for explanation.
2184 DCHECK((break_as_stop &&
2185 code <= kMaxStopCode &&
2186 code > kMaxWatchpointCode) ||
2187 (!break_as_stop &&
2188 (code > kMaxStopCode ||
2189 code <= kMaxWatchpointCode)));
2190 Instr break_instr = SPECIAL | BREAK | (code << 6);
2191 emit(break_instr);
2192}
2193
2194
2195void Assembler::stop(const char* msg, uint32_t code) {
2196 DCHECK(code > kMaxWatchpointCode);
2197 DCHECK(code <= kMaxStopCode);
2198#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
2199 break_(0x54321);
2200#else // V8_HOST_ARCH_MIPS
2201 BlockTrampolinePoolFor(3);
2202 // The Simulator will handle the stop instruction and get the message address.
2203 // On MIPS stop() is just a special kind of break_().
2204 break_(code, true);
2205 emit(reinterpret_cast<uint64_t>(msg));
2206#endif
2207}
2208
2209
2210void Assembler::tge(Register rs, Register rt, uint16_t code) {
2211 DCHECK(is_uint10(code));
2212 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
2213 | rt.code() << kRtShift | code << 6;
2214 emit(instr);
2215}
2216
2217
2218void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2219 DCHECK(is_uint10(code));
2220 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
2221 | rt.code() << kRtShift | code << 6;
2222 emit(instr);
2223}
2224
2225
2226void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2227 DCHECK(is_uint10(code));
2228 Instr instr =
2229 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2230 emit(instr);
2231}
2232
2233
2234void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2235 DCHECK(is_uint10(code));
2236 Instr instr =
2237 SPECIAL | TLTU | rs.code() << kRsShift
2238 | rt.code() << kRtShift | code << 6;
2239 emit(instr);
2240}
2241
2242
2243void Assembler::teq(Register rs, Register rt, uint16_t code) {
2244 DCHECK(is_uint10(code));
2245 Instr instr =
2246 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2247 emit(instr);
2248}
2249
2250
2251void Assembler::tne(Register rs, Register rt, uint16_t code) {
2252 DCHECK(is_uint10(code));
2253 Instr instr =
2254 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2255 emit(instr);
2256}
2257
2258
2259// Move from HI/LO register.
2260
2261void Assembler::mfhi(Register rd) {
2262 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2263}
2264
2265
2266void Assembler::mflo(Register rd) {
2267 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2268}
2269
2270
2271// Set on less than instructions.
2272void Assembler::slt(Register rd, Register rs, Register rt) {
2273 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2274}
2275
2276
2277void Assembler::sltu(Register rd, Register rs, Register rt) {
2278 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2279}
2280
2281
2282void Assembler::slti(Register rt, Register rs, int32_t j) {
2283 GenInstrImmediate(SLTI, rs, rt, j);
2284}
2285
2286
2287void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2288 GenInstrImmediate(SLTIU, rs, rt, j);
2289}
2290
2291
2292// Conditional move.
2293void Assembler::movz(Register rd, Register rs, Register rt) {
2294 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2295}
2296
2297
2298void Assembler::movn(Register rd, Register rs, Register rt) {
2299 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2300}
2301
2302
2303void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2304 Register rt;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002305 rt.reg_code = (cc & 0x0007) << 2 | 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002306 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2307}
2308
2309
2310void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2311 Register rt;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002312 rt.reg_code = (cc & 0x0007) << 2 | 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002313 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2314}
2315
2316
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002317void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2318 min(S, fd, fs, ft);
2319}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002320
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002321
2322void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2323 min(D, fd, fs, ft);
2324}
2325
2326
2327void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2328 max(S, fd, fs, ft);
2329}
2330
2331
2332void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2333 max(D, fd, fs, ft);
2334}
2335
2336
2337void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2338 mina(S, fd, fs, ft);
2339}
2340
2341
2342void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2343 mina(D, fd, fs, ft);
2344}
2345
2346
2347void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2348 maxa(S, fd, fs, ft);
2349}
2350
2351
2352void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2353 maxa(D, fd, fs, ft);
2354}
2355
2356
2357void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2358 FPURegister ft) {
2359 DCHECK(kArchVariant == kMips64r6);
2360 DCHECK((fmt == D) || (fmt == S));
2361 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2362}
2363
2364
2365void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2366 FPURegister ft) {
2367 DCHECK(kArchVariant == kMips64r6);
2368 DCHECK((fmt == D) || (fmt == S));
2369 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002370}
2371
2372
2373// GPR.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002374void Assembler::seleqz(Register rd, Register rs, Register rt) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002375 DCHECK(kArchVariant == kMips64r6);
2376 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2377}
2378
2379
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002380// GPR.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002381void Assembler::selnez(Register rd, Register rs, Register rt) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002382 DCHECK(kArchVariant == kMips64r6);
2383 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2384}
2385
2386
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002387// Bit twiddling.
2388void Assembler::clz(Register rd, Register rs) {
2389 if (kArchVariant != kMips64r6) {
2390 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2391 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2392 } else {
2393 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2394 }
2395}
2396
2397
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002398void Assembler::dclz(Register rd, Register rs) {
2399 if (kArchVariant != kMips64r6) {
2400 // dclz instr requires same GPR number in 'rd' and 'rt' fields.
2401 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, DCLZ);
2402 } else {
2403 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, DCLZ_R6);
2404 }
2405}
2406
2407
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002408void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2409 // Should be called via MacroAssembler::Ins.
2410 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2411 DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
2412 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2413}
2414
2415
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002416void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2417 // Should be called via MacroAssembler::Dins.
2418 // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
2419 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2420 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
2421}
2422
2423
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002424void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2425 // Should be called via MacroAssembler::Ext.
2426 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2427 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2428 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2429}
2430
2431
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002432void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002433 // Should be called via MacroAssembler::Dext.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002434 // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
2435 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2436 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
2437}
2438
2439
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002440void Assembler::dextm(Register rt, Register rs, uint16_t pos, uint16_t size) {
2441 // Should be called via MacroAssembler::Dextm.
2442 // Dextm instr has 'rt' field as dest, and two uint5: msb, lsb.
2443 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2444 GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
2445}
2446
2447
2448void Assembler::dextu(Register rt, Register rs, uint16_t pos, uint16_t size) {
2449 // Should be called via MacroAssembler::Dextu.
2450 // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
2451 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2452 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
2453}
2454
2455
2456void Assembler::bitswap(Register rd, Register rt) {
2457 DCHECK(kArchVariant == kMips64r6);
2458 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2459}
2460
2461
2462void Assembler::dbitswap(Register rd, Register rt) {
2463 DCHECK(kArchVariant == kMips64r6);
2464 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBSHFL);
2465}
2466
2467
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002468void Assembler::pref(int32_t hint, const MemOperand& rs) {
2469 DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2470 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2471 | (rs.offset_);
2472 emit(instr);
2473}
2474
2475
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002476void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2477 DCHECK(kArchVariant == kMips64r6);
2478 DCHECK(is_uint3(bp));
2479 uint16_t sa = (ALIGN << kBp2Bits) | bp;
2480 GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2481}
2482
2483
2484void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
2485 DCHECK(kArchVariant == kMips64r6);
2486 DCHECK(is_uint3(bp));
2487 uint16_t sa = (DALIGN << kBp3Bits) | bp;
2488 GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
2489}
2490
2491
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002492// --------Coprocessor-instructions----------------
2493
2494// Load, store, move.
2495void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002496 if (is_int16(src.offset_)) {
2497 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2498 } else { // Offset > 16 bits, use multiple instructions to load.
2499 LoadRegPlusOffsetToAt(src);
2500 GenInstrImmediate(LWC1, at, fd, 0);
2501 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002502}
2503
2504
2505void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002506 if (is_int16(src.offset_)) {
2507 GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
2508 } else { // Offset > 16 bits, use multiple instructions to load.
2509 LoadRegPlusOffsetToAt(src);
2510 GenInstrImmediate(LDC1, at, fd, 0);
2511 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002512}
2513
2514
2515void Assembler::swc1(FPURegister fd, const MemOperand& src) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002516 if (is_int16(src.offset_)) {
2517 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
2518 } else { // Offset > 16 bits, use multiple instructions to load.
2519 LoadRegPlusOffsetToAt(src);
2520 GenInstrImmediate(SWC1, at, fd, 0);
2521 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002522}
2523
2524
2525void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002526 DCHECK(!src.rm().is(at));
2527 if (is_int16(src.offset_)) {
2528 GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
2529 } else { // Offset > 16 bits, use multiple instructions to load.
2530 LoadRegPlusOffsetToAt(src);
2531 GenInstrImmediate(SDC1, at, fd, 0);
2532 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002533}
2534
2535
2536void Assembler::mtc1(Register rt, FPURegister fs) {
2537 GenInstrRegister(COP1, MTC1, rt, fs, f0);
2538}
2539
2540
2541void Assembler::mthc1(Register rt, FPURegister fs) {
2542 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2543}
2544
2545
2546void Assembler::dmtc1(Register rt, FPURegister fs) {
2547 GenInstrRegister(COP1, DMTC1, rt, fs, f0);
2548}
2549
2550
2551void Assembler::mfc1(Register rt, FPURegister fs) {
2552 GenInstrRegister(COP1, MFC1, rt, fs, f0);
2553}
2554
2555
2556void Assembler::mfhc1(Register rt, FPURegister fs) {
2557 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2558}
2559
2560
2561void Assembler::dmfc1(Register rt, FPURegister fs) {
2562 GenInstrRegister(COP1, DMFC1, rt, fs, f0);
2563}
2564
2565
2566void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2567 GenInstrRegister(COP1, CTC1, rt, fs);
2568}
2569
2570
2571void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2572 GenInstrRegister(COP1, CFC1, rt, fs);
2573}
2574
2575
2576void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2577 uint64_t i;
2578 memcpy(&i, &d, 8);
2579
2580 *lo = i & 0xffffffff;
2581 *hi = i >> 32;
2582}
2583
2584
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002585void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2586 FPURegister ft) {
2587 DCHECK(kArchVariant == kMips64r6);
2588 DCHECK((fmt == D) || (fmt == S));
2589
2590 GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2591}
2592
2593
2594void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2595 sel(S, fd, fs, ft);
2596}
2597
2598
2599void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2600 sel(D, fd, fs, ft);
2601}
2602
2603
2604// FPR.
2605void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2606 FPURegister ft) {
2607 DCHECK((fmt == D) || (fmt == S));
2608 GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2609}
2610
2611
2612void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2613 seleqz(D, fd, fs, ft);
2614}
2615
2616
2617void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2618 seleqz(S, fd, fs, ft);
2619}
2620
2621
2622void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2623 selnez(D, fd, fs, ft);
2624}
2625
2626
2627void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2628 selnez(S, fd, fs, ft);
2629}
2630
2631
2632void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2633 DCHECK(kArchVariant == kMips64r2);
2634 GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2635}
2636
2637
2638void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2639 DCHECK(kArchVariant == kMips64r2);
2640 GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2641}
2642
2643
2644void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2645 DCHECK(kArchVariant == kMips64r2);
2646 FPURegister ft;
2647 ft.reg_code = (cc & 0x0007) << 2 | 1;
2648 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2649}
2650
2651
2652void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2653 DCHECK(kArchVariant == kMips64r2);
2654 FPURegister ft;
2655 ft.reg_code = (cc & 0x0007) << 2 | 1;
2656 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2657}
2658
2659
2660void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2661 DCHECK(kArchVariant == kMips64r2);
2662 FPURegister ft;
2663 ft.reg_code = (cc & 0x0007) << 2 | 0;
2664 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2665}
2666
2667
2668void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2669 DCHECK(kArchVariant == kMips64r2);
2670 FPURegister ft;
2671 ft.reg_code = (cc & 0x0007) << 2 | 0;
2672 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2673}
2674
2675
2676void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2677 DCHECK(kArchVariant == kMips64r2);
2678 GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2679}
2680
2681
2682void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2683 DCHECK(kArchVariant == kMips64r2);
2684 GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2685}
2686
2687
2688// FPR.
2689void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2690 FPURegister ft) {
2691 DCHECK(kArchVariant == kMips64r6);
2692 DCHECK((fmt == D) || (fmt == S));
2693 GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2694}
2695
2696
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002697// Arithmetic.
2698
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002699void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2700 GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
2701}
2702
2703
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002704void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2705 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2706}
2707
2708
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002709void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2710 GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
2711}
2712
2713
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002714void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2715 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2716}
2717
2718
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002719void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2720 GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
2721}
2722
2723
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002724void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2725 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2726}
2727
2728
2729void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2730 FPURegister ft) {
2731 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2732}
2733
2734
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002735void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2736 GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
2737}
2738
2739
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002740void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2741 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2742}
2743
2744
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002745void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2746 GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
2747}
2748
2749
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002750void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2751 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2752}
2753
2754
2755void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2756 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2757}
2758
2759
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002760void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2761 GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2762}
2763
2764
2765void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2766 GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
2767}
2768
2769
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002770void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2771 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2772}
2773
2774
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002775void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2776 GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
2777}
2778
2779
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002780void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2781 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2782}
2783
2784
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002785void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2786 GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2787}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002788
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002789
2790void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2791 GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2792}
2793
2794
2795void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2796 GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2797}
2798
2799
2800void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2801 GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2802}
2803
2804
2805// Conversions.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002806void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2807 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2808}
2809
2810
2811void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2812 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2813}
2814
2815
2816void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2817 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2818}
2819
2820
2821void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2822 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2823}
2824
2825
2826void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2827 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2828}
2829
2830
2831void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2832 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2833}
2834
2835
2836void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2837 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2838}
2839
2840
2841void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2842 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2843}
2844
2845
2846void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2847 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2848}
2849
2850
2851void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2852 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2853}
2854
2855
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002856void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2857
2858
2859void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
2860
2861
2862void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2863 DCHECK(kArchVariant == kMips64r6);
2864 GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
2865}
2866
2867
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002868void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002869 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002870 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2871}
2872
2873
2874void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002875 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002876 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2877}
2878
2879
2880void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002881 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002882 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2883}
2884
2885
2886void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002887 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002888 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2889}
2890
2891
2892void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2893 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2894}
2895
2896
2897void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2898 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2899}
2900
2901
2902void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2903 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2904}
2905
2906
2907void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2908 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2909}
2910
2911
2912void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2913 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2914}
2915
2916
2917void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2918 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2919}
2920
2921
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002922void Assembler::class_s(FPURegister fd, FPURegister fs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002923 DCHECK(kArchVariant == kMips64r6);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002924 GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002925}
2926
2927
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002928void Assembler::class_d(FPURegister fd, FPURegister fs) {
2929 DCHECK(kArchVariant == kMips64r6);
2930 GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
2931}
2932
2933
2934void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
2935 FPURegister ft) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002936 DCHECK(kArchVariant == kMips64r6);
2937 DCHECK((fmt == D) || (fmt == S));
2938 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2939}
2940
2941
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002942void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
2943 FPURegister ft) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002944 DCHECK(kArchVariant == kMips64r6);
2945 DCHECK((fmt == D) || (fmt == S));
2946 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2947}
2948
2949
2950void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2951 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2952}
2953
2954
2955void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002956 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002957 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2958}
2959
2960
2961void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2962 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2963}
2964
2965
2966void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2967 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2968}
2969
2970
2971void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002972 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002973 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2974}
2975
2976
2977void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2978 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2979}
2980
2981
2982// Conditions for >= MIPSr6.
2983void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
2984 FPURegister fd, FPURegister fs, FPURegister ft) {
2985 DCHECK(kArchVariant == kMips64r6);
2986 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2987 Instr instr = COP1 | fmt | ft.code() << kFtShift |
2988 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
2989 emit(instr);
2990}
2991
2992
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002993void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
2994 FPURegister ft) {
2995 cmp(cond, W, fd, fs, ft);
2996}
2997
2998void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
2999 FPURegister ft) {
3000 cmp(cond, L, fd, fs, ft);
3001}
3002
3003
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003004void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
3005 DCHECK(kArchVariant == kMips64r6);
3006 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
3007 emit(instr);
3008}
3009
3010
3011void Assembler::bc1nez(int16_t offset, FPURegister ft) {
3012 DCHECK(kArchVariant == kMips64r6);
3013 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
3014 emit(instr);
3015}
3016
3017
3018// Conditions for < MIPSr6.
3019void Assembler::c(FPUCondition cond, SecondaryField fmt,
3020 FPURegister fs, FPURegister ft, uint16_t cc) {
3021 DCHECK(kArchVariant != kMips64r6);
3022 DCHECK(is_uint3(cc));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003023 DCHECK(fmt == S || fmt == D);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003024 DCHECK((fmt & ~(31 << kRsShift)) == 0);
3025 Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
3026 | cc << 8 | 3 << 4 | cond;
3027 emit(instr);
3028}
3029
3030
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003031void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
3032 uint16_t cc) {
3033 c(cond, S, fs, ft, cc);
3034}
3035
3036
3037void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
3038 uint16_t cc) {
3039 c(cond, D, fs, ft, cc);
3040}
3041
3042
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003043void Assembler::fcmp(FPURegister src1, const double src2,
3044 FPUCondition cond) {
3045 DCHECK(src2 == 0.0);
3046 mtc1(zero_reg, f14);
3047 cvt_d_w(f14, f14);
3048 c(cond, D, src1, f14, 0);
3049}
3050
3051
3052void Assembler::bc1f(int16_t offset, uint16_t cc) {
3053 DCHECK(is_uint3(cc));
3054 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
3055 emit(instr);
3056}
3057
3058
3059void Assembler::bc1t(int16_t offset, uint16_t cc) {
3060 DCHECK(is_uint3(cc));
3061 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
3062 emit(instr);
3063}
3064
3065
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003066int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
3067 intptr_t pc_delta) {
3068 if (RelocInfo::IsInternalReference(rmode)) {
3069 int64_t* p = reinterpret_cast<int64_t*>(pc);
3070 if (*p == kEndOfJumpChain) {
3071 return 0; // Number of instructions patched.
3072 }
3073 *p += pc_delta;
3074 return 2; // Number of instructions patched.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003075 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003076 Instr instr = instr_at(pc);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003077 DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003078 if (IsLui(instr)) {
3079 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
3080 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
3081 Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
3082 DCHECK(IsOri(instr_ori));
3083 DCHECK(IsOri(instr_ori2));
3084 // TODO(plind): symbolic names for the shifts.
3085 int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
3086 imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
3087 imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
3088 // Sign extend address.
3089 imm >>= 16;
3090
3091 if (imm == kEndOfJumpChain) {
3092 return 0; // Number of instructions patched.
3093 }
3094 imm += pc_delta;
3095 DCHECK((imm & 3) == 0);
3096
3097 instr_lui &= ~kImm16Mask;
3098 instr_ori &= ~kImm16Mask;
3099 instr_ori2 &= ~kImm16Mask;
3100
3101 instr_at_put(pc + 0 * Assembler::kInstrSize,
3102 instr_lui | ((imm >> 32) & kImm16Mask));
3103 instr_at_put(pc + 1 * Assembler::kInstrSize,
3104 instr_ori | (imm >> 16 & kImm16Mask));
3105 instr_at_put(pc + 3 * Assembler::kInstrSize,
3106 instr_ori2 | (imm & kImm16Mask));
3107 return 4; // Number of instructions patched.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003108 } else if (IsJ(instr) || IsJal(instr)) {
3109 // Regular j/jal relocation.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003110 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003111 imm28 += pc_delta;
3112 imm28 &= kImm28Mask;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003113 instr &= ~kImm26Mask;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003114 DCHECK((imm28 & 3) == 0);
3115 uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003116 instr_at_put(pc, instr | (imm26 & kImm26Mask));
3117 return 1; // Number of instructions patched.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003118 } else {
3119 DCHECK(((instr & kJumpRawMask) == kJRawMark) ||
3120 ((instr & kJumpRawMask) == kJalRawMark));
3121 // Unbox raw offset and emit j/jal.
3122 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
3123 // Sign extend 28-bit offset to 32-bit.
3124 imm28 = (imm28 << 4) >> 4;
3125 uint64_t target =
3126 static_cast<int64_t>(imm28) + reinterpret_cast<uint64_t>(pc);
3127 target &= kImm28Mask;
3128 DCHECK((imm28 & 3) == 0);
3129 uint32_t imm26 = static_cast<uint32_t>(target >> 2);
3130 // Check markings whether to emit j or jal.
3131 uint32_t unbox = (instr & kJRawMark) ? J : JAL;
3132 instr_at_put(pc, unbox | (imm26 & kImm26Mask));
3133 return 1; // Number of instructions patched.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003134 }
3135}
3136
3137
3138void Assembler::GrowBuffer() {
3139 if (!own_buffer_) FATAL("external code buffer is too small");
3140
3141 // Compute new buffer size.
3142 CodeDesc desc; // The new buffer.
3143 if (buffer_size_ < 1 * MB) {
3144 desc.buffer_size = 2*buffer_size_;
3145 } else {
3146 desc.buffer_size = buffer_size_ + 1*MB;
3147 }
3148 CHECK_GT(desc.buffer_size, 0); // No overflow.
3149
3150 // Set up new buffer.
3151 desc.buffer = NewArray<byte>(desc.buffer_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003152 desc.origin = this;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003153
3154 desc.instr_size = pc_offset();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003155 desc.reloc_size =
3156 static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003157
3158 // Copy the data.
3159 intptr_t pc_delta = desc.buffer - buffer_;
3160 intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
3161 (buffer_ + buffer_size_);
3162 MemMove(desc.buffer, buffer_, desc.instr_size);
3163 MemMove(reloc_info_writer.pos() + rc_delta,
3164 reloc_info_writer.pos(), desc.reloc_size);
3165
3166 // Switch buffers.
3167 DeleteArray(buffer_);
3168 buffer_ = desc.buffer;
3169 buffer_size_ = desc.buffer_size;
3170 pc_ += pc_delta;
3171 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3172 reloc_info_writer.last_pc() + pc_delta);
3173
3174 // Relocate runtime entries.
3175 for (RelocIterator it(desc); !it.done(); it.next()) {
3176 RelocInfo::Mode rmode = it.rinfo()->rmode();
3177 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
3178 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003179 RelocateInternalReference(rmode, p, pc_delta);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003180 }
3181 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003182 DCHECK(!overflow());
3183}
3184
3185
3186void Assembler::db(uint8_t data) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003187 CheckForEmitInForbiddenSlot();
3188 EmitHelper(data);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003189}
3190
3191
3192void Assembler::dd(uint32_t data) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003193 CheckForEmitInForbiddenSlot();
3194 EmitHelper(data);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003195}
3196
3197
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003198void Assembler::dq(uint64_t data) {
3199 CheckForEmitInForbiddenSlot();
3200 EmitHelper(data);
3201}
3202
3203
3204void Assembler::dd(Label* label) {
3205 uint64_t data;
3206 CheckForEmitInForbiddenSlot();
3207 if (label->is_bound()) {
3208 data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
3209 } else {
3210 data = jump_address(label);
3211 internal_reference_positions_.insert(label->pos());
3212 }
3213 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3214 EmitHelper(data);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003215}
3216
3217
3218void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3219 // We do not try to reuse pool constants.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003220 RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
3221 if (rmode >= RelocInfo::COMMENT &&
Ben Murdochda12d292016-06-02 14:46:10 +01003222 rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003223 // Adjust code for new modes.
3224 DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003225 || RelocInfo::IsComment(rmode)
3226 || RelocInfo::IsPosition(rmode));
3227 // These modes do not need an entry in the constant pool.
3228 }
3229 if (!RelocInfo::IsNone(rinfo.rmode())) {
3230 // Don't record external references unless the heap will be serialized.
3231 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
3232 !serializer_enabled() && !emit_debug_code()) {
3233 return;
3234 }
3235 DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
3236 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003237 RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
3238 RecordedAstId().ToInt(), NULL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003239 ClearRecordedAstId();
3240 reloc_info_writer.Write(&reloc_info_with_ast_id);
3241 } else {
3242 reloc_info_writer.Write(&rinfo);
3243 }
3244 }
3245}
3246
3247
3248void Assembler::BlockTrampolinePoolFor(int instructions) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003249 CheckTrampolinePoolQuick(instructions);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003250 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
3251}
3252
3253
3254void Assembler::CheckTrampolinePool() {
3255 // Some small sequences of instructions must not be broken up by the
3256 // insertion of a trampoline pool; such sequences are protected by setting
3257 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
3258 // which are both checked here. Also, recursive calls to CheckTrampolinePool
3259 // are blocked by trampoline_pool_blocked_nesting_.
3260 if ((trampoline_pool_blocked_nesting_ > 0) ||
3261 (pc_offset() < no_trampoline_pool_before_)) {
3262 // Emission is currently blocked; make sure we try again as soon as
3263 // possible.
3264 if (trampoline_pool_blocked_nesting_ > 0) {
3265 next_buffer_check_ = pc_offset() + kInstrSize;
3266 } else {
3267 next_buffer_check_ = no_trampoline_pool_before_;
3268 }
3269 return;
3270 }
3271
3272 DCHECK(!trampoline_emitted_);
3273 DCHECK(unbound_labels_count_ >= 0);
3274 if (unbound_labels_count_ > 0) {
3275 // First we emit jump (2 instructions), then we emit trampoline pool.
3276 { BlockTrampolinePoolScope block_trampoline_pool(this);
3277 Label after_pool;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003278 if (kArchVariant == kMips64r6) {
3279 bc(&after_pool);
3280 } else {
3281 b(&after_pool);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003282 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003283 nop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003284
3285 int pool_start = pc_offset();
3286 for (int i = 0; i < unbound_labels_count_; i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003287 { BlockGrowBufferScope block_buf_growth(this);
3288 // Buffer growth (and relocation) must be blocked for internal
3289 // references until associated instructions are emitted and available
3290 // to be patched.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003291 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3292 j(&after_pool);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003293 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003294 nop();
3295 }
3296 bind(&after_pool);
3297 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
3298
3299 trampoline_emitted_ = true;
3300 // As we are only going to emit trampoline once, we need to prevent any
3301 // further emission.
3302 next_buffer_check_ = kMaxInt;
3303 }
3304 } else {
3305 // Number of branches to unbound label at this point is zero, so we can
3306 // move next buffer check to maximum.
3307 next_buffer_check_ = pc_offset() +
3308 kMaxBranchOffset - kTrampolineSlotsSize * 16;
3309 }
3310 return;
3311}
3312
3313
3314Address Assembler::target_address_at(Address pc) {
3315 Instr instr0 = instr_at(pc);
3316 Instr instr1 = instr_at(pc + 1 * kInstrSize);
3317 Instr instr3 = instr_at(pc + 3 * kInstrSize);
3318
3319 // Interpret 4 instructions for address generated by li: See listing in
3320 // Assembler::set_target_address_at() just below.
3321 if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
3322 (GetOpcodeField(instr3) == ORI)) {
3323 // Assemble the 48 bit value.
3324 int64_t addr = static_cast<int64_t>(
3325 ((uint64_t)(GetImmediate16(instr0)) << 32) |
3326 ((uint64_t)(GetImmediate16(instr1)) << 16) |
3327 ((uint64_t)(GetImmediate16(instr3))));
3328
3329 // Sign extend to get canonical address.
3330 addr = (addr << 16) >> 16;
3331 return reinterpret_cast<Address>(addr);
3332 }
3333 // We should never get here, force a bad address if we do.
3334 UNREACHABLE();
3335 return (Address)0x0;
3336}
3337
3338
3339// MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
3340// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
3341// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
3342// OS::nan_value() returns a qNaN.
3343void Assembler::QuietNaN(HeapObject* object) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003344 HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003345}
3346
3347
3348// On Mips64, a target address is stored in a 4-instruction sequence:
3349// 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
3350// 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
3351// 2: dsll(rd, rd, 16);
3352// 3: ori(rd, rd, j.imm32_ & kImm16Mask);
3353//
3354// Patching the address must replace all the lui & ori instructions,
3355// and flush the i-cache.
3356//
3357// There is an optimization below, which emits a nop when the address
3358// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
3359// and possibly removed.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003360void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003361 Address target,
3362 ICacheFlushMode icache_flush_mode) {
3363// There is an optimization where only 4 instructions are used to load address
3364// in code on MIP64 because only 48-bits of address is effectively used.
3365// It relies on fact the upper [63:48] bits are not used for virtual address
3366// translation and they have to be set according to value of bit 47 in order
3367// get canonical address.
3368 Instr instr1 = instr_at(pc + kInstrSize);
3369 uint32_t rt_code = GetRt(instr1);
3370 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
3371 uint64_t itarget = reinterpret_cast<uint64_t>(target);
3372
3373#ifdef DEBUG
3374 // Check we have the result from a li macro-instruction.
3375 Instr instr0 = instr_at(pc);
3376 Instr instr3 = instr_at(pc + kInstrSize * 3);
3377 CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
3378 GetOpcodeField(instr3) == ORI));
3379#endif
3380
3381 // Must use 4 instructions to insure patchable code.
3382 // lui rt, upper-16.
3383 // ori rt, rt, lower-16.
3384 // dsll rt, rt, 16.
3385 // ori rt rt, lower-16.
3386 *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
3387 *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
3388 | ((itarget >> 16) & kImm16Mask);
3389 *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
3390 | (itarget & kImm16Mask);
3391
3392 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003393 Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003394 }
3395}
3396
3397
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003398} // namespace internal
3399} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003400
3401#endif // V8_TARGET_ARCH_MIPS64