blob: 2b8bc72c6c4e57f92ffd4b79a1d2208c90fd00c9 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2012 the V8 project authors. All rights reserved.
34
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000035#include "src/mips64/assembler-mips64.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036
37#if V8_TARGET_ARCH_MIPS64
38
39#include "src/base/cpu.h"
40#include "src/mips64/assembler-mips64-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000041
42namespace v8 {
43namespace internal {
44
45
46// Get the CPU features enabled by the build. For cross compilation the
47// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
48// can be defined to enable FPU instructions when building the
49// snapshot.
50static unsigned CpuFeaturesImpliedByCompiler() {
51 unsigned answer = 0;
52#ifdef CAN_USE_FPU_INSTRUCTIONS
53 answer |= 1u << FPU;
54#endif // def CAN_USE_FPU_INSTRUCTIONS
55
56 // If the compiler is allowed to use FPU then we can use FPU too in our code
57 // generation even when generating snapshots. This won't work for cross
58 // compilation.
59#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
60 answer |= 1u << FPU;
61#endif
62
63 return answer;
64}
65
66
Ben Murdochb8a8cc12014-11-26 15:28:44 +000067void CpuFeatures::ProbeImpl(bool cross_compile) {
68 supported_ |= CpuFeaturesImpliedByCompiler();
69
70 // Only use statically determined features for cross compile (snapshot).
71 if (cross_compile) return;
72
73 // If the compiler is allowed to use fpu then we can use fpu too in our
74 // code generation.
75#ifndef __mips__
76 // For the simulator build, use FPU.
77 supported_ |= 1u << FPU;
78#else
79 // Probe for additional features at runtime.
80 base::CPU cpu;
81 if (cpu.has_fpu()) supported_ |= 1u << FPU;
82#endif
83}
84
85
86void CpuFeatures::PrintTarget() { }
87void CpuFeatures::PrintFeatures() { }
88
89
90int ToNumber(Register reg) {
91 DCHECK(reg.is_valid());
92 const int kNumbers[] = {
93 0, // zero_reg
94 1, // at
95 2, // v0
96 3, // v1
97 4, // a0
98 5, // a1
99 6, // a2
100 7, // a3
101 8, // a4
102 9, // a5
103 10, // a6
104 11, // a7
105 12, // t0
106 13, // t1
107 14, // t2
108 15, // t3
109 16, // s0
110 17, // s1
111 18, // s2
112 19, // s3
113 20, // s4
114 21, // s5
115 22, // s6
116 23, // s7
117 24, // t8
118 25, // t9
119 26, // k0
120 27, // k1
121 28, // gp
122 29, // sp
123 30, // fp
124 31, // ra
125 };
126 return kNumbers[reg.code()];
127}
128
129
130Register ToRegister(int num) {
131 DCHECK(num >= 0 && num < kNumRegisters);
132 const Register kRegisters[] = {
133 zero_reg,
134 at,
135 v0, v1,
136 a0, a1, a2, a3, a4, a5, a6, a7,
137 t0, t1, t2, t3,
138 s0, s1, s2, s3, s4, s5, s6, s7,
139 t8, t9,
140 k0, k1,
141 gp,
142 sp,
143 fp,
144 ra
145 };
146 return kRegisters[num];
147}
148
149
150// -----------------------------------------------------------------------------
151// Implementation of RelocInfo.
152
153const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000154 1 << RelocInfo::INTERNAL_REFERENCE |
155 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000156
157
158bool RelocInfo::IsCodedSpecially() {
159 // The deserializer needs to know whether a pointer is specially coded. Being
160 // specially coded on MIPS means that it is a lui/ori instruction, and that is
161 // always the case inside code objects.
162 return true;
163}
164
165
166bool RelocInfo::IsInConstantPool() {
167 return false;
168}
169
Ben Murdochc5610432016-08-08 18:44:38 +0100170Address RelocInfo::wasm_memory_reference() {
171 DCHECK(IsWasmMemoryReference(rmode_));
172 return Assembler::target_address_at(pc_, host_);
173}
174
175uint32_t RelocInfo::wasm_memory_size_reference() {
176 DCHECK(IsWasmMemorySizeReference(rmode_));
177 return static_cast<uint32_t>(
178 reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
179}
180
181void RelocInfo::update_wasm_memory_reference(
182 Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
183 ICacheFlushMode icache_flush_mode) {
184 DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
185 if (IsWasmMemoryReference(rmode_)) {
186 Address updated_memory_reference;
187 DCHECK(old_base <= wasm_memory_reference() &&
188 wasm_memory_reference() < old_base + old_size);
189 updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
190 DCHECK(new_base <= updated_memory_reference &&
191 updated_memory_reference < new_base + new_size);
192 Assembler::set_target_address_at(
193 isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
194 } else if (IsWasmMemorySizeReference(rmode_)) {
195 uint32_t updated_size_reference;
196 DCHECK(wasm_memory_size_reference() <= old_size);
197 updated_size_reference =
198 new_size + (wasm_memory_size_reference() - old_size);
199 DCHECK(updated_size_reference <= new_size);
200 Assembler::set_target_address_at(
201 isolate_, pc_, host_, reinterpret_cast<Address>(updated_size_reference),
202 icache_flush_mode);
203 } else {
204 UNREACHABLE();
205 }
206}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000207
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000208// -----------------------------------------------------------------------------
209// Implementation of Operand and MemOperand.
210// See assembler-mips-inl.h for inlined constructors.
211
212Operand::Operand(Handle<Object> handle) {
213 AllowDeferredHandleDereference using_raw_address;
214 rm_ = no_reg;
215 // Verify all Objects referred by code are NOT in new space.
216 Object* obj = *handle;
217 if (obj->IsHeapObject()) {
218 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
219 imm64_ = reinterpret_cast<intptr_t>(handle.location());
220 rmode_ = RelocInfo::EMBEDDED_OBJECT;
221 } else {
222 // No relocation needed.
223 imm64_ = reinterpret_cast<intptr_t>(obj);
224 rmode_ = RelocInfo::NONE64;
225 }
226}
227
228
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000229MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000230 offset_ = offset;
231}
232
233
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000234MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
235 OffsetAddend offset_addend)
236 : Operand(rm) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000237 offset_ = unit * multiplier + offset_addend;
238}
239
240
241// -----------------------------------------------------------------------------
242// Specific instructions, constants, and masks.
243
244static const int kNegOffset = 0x00008000;
245// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
246// operations as post-increment of sp.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000247const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
248 (Register::kCode_sp << kRtShift) |
249 (kPointerSize & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000250// daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000251const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
252 (Register::kCode_sp << kRtShift) |
253 (-kPointerSize & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000254// sd(r, MemOperand(sp, 0))
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000255const Instr kPushRegPattern =
256 SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000257// ld(r, MemOperand(sp, 0))
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000258const Instr kPopRegPattern =
259 LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000260
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000261const Instr kLwRegFpOffsetPattern =
262 LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000263
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000264const Instr kSwRegFpOffsetPattern =
265 SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000266
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000267const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
268 (kNegOffset & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000269
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000270const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
271 (kNegOffset & kImm16Mask); // NOLINT
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000272// A mask for the Rt register for push, pop, lw, sw instructions.
273const Instr kRtMask = kRtFieldMask;
274const Instr kLwSwInstrTypeMask = 0xffe00000;
275const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
276const Instr kLwSwOffsetMask = kImm16Mask;
277
278
279Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
280 : AssemblerBase(isolate, buffer, buffer_size),
281 recorded_ast_id_(TypeFeedbackId::None()),
282 positions_recorder_(this) {
283 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
284
285 last_trampoline_pool_end_ = 0;
286 no_trampoline_pool_before_ = 0;
287 trampoline_pool_blocked_nesting_ = 0;
288 // We leave space (16 * kTrampolineSlotsSize)
289 // for BlockTrampolinePoolScope buffer.
290 next_buffer_check_ = FLAG_force_long_branches
291 ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
292 internal_trampoline_exception_ = false;
293 last_bound_pos_ = 0;
294
295 trampoline_emitted_ = FLAG_force_long_branches;
296 unbound_labels_count_ = 0;
297 block_buffer_growth_ = false;
298
299 ClearRecordedAstId();
300}
301
302
303void Assembler::GetCode(CodeDesc* desc) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000304 EmitForbiddenSlotInstruction();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000305 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
306 // Set up code descriptor.
307 desc->buffer = buffer_;
308 desc->buffer_size = buffer_size_;
309 desc->instr_size = pc_offset();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000310 desc->reloc_size =
311 static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000312 desc->origin = this;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000313 desc->constant_pool_size = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000314}
315
316
317void Assembler::Align(int m) {
318 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000319 EmitForbiddenSlotInstruction();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000320 while ((pc_offset() & (m - 1)) != 0) {
321 nop();
322 }
323}
324
325
326void Assembler::CodeTargetAlign() {
327 // No advantage to aligning branch/call targets to more than
328 // single instruction, that I am aware of.
329 Align(4);
330}
331
332
333Register Assembler::GetRtReg(Instr instr) {
334 Register rt;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000335 rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000336 return rt;
337}
338
339
340Register Assembler::GetRsReg(Instr instr) {
341 Register rs;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000342 rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000343 return rs;
344}
345
346
347Register Assembler::GetRdReg(Instr instr) {
348 Register rd;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000349 rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000350 return rd;
351}
352
353
354uint32_t Assembler::GetRt(Instr instr) {
355 return (instr & kRtFieldMask) >> kRtShift;
356}
357
358
359uint32_t Assembler::GetRtField(Instr instr) {
360 return instr & kRtFieldMask;
361}
362
363
364uint32_t Assembler::GetRs(Instr instr) {
365 return (instr & kRsFieldMask) >> kRsShift;
366}
367
368
369uint32_t Assembler::GetRsField(Instr instr) {
370 return instr & kRsFieldMask;
371}
372
373
374uint32_t Assembler::GetRd(Instr instr) {
375 return (instr & kRdFieldMask) >> kRdShift;
376}
377
378
379uint32_t Assembler::GetRdField(Instr instr) {
380 return instr & kRdFieldMask;
381}
382
383
384uint32_t Assembler::GetSa(Instr instr) {
385 return (instr & kSaFieldMask) >> kSaShift;
386}
387
388
389uint32_t Assembler::GetSaField(Instr instr) {
390 return instr & kSaFieldMask;
391}
392
393
394uint32_t Assembler::GetOpcodeField(Instr instr) {
395 return instr & kOpcodeMask;
396}
397
398
399uint32_t Assembler::GetFunction(Instr instr) {
400 return (instr & kFunctionFieldMask) >> kFunctionShift;
401}
402
403
404uint32_t Assembler::GetFunctionField(Instr instr) {
405 return instr & kFunctionFieldMask;
406}
407
408
409uint32_t Assembler::GetImmediate16(Instr instr) {
410 return instr & kImm16Mask;
411}
412
413
414uint32_t Assembler::GetLabelConst(Instr instr) {
415 return instr & ~kImm16Mask;
416}
417
418
419bool Assembler::IsPop(Instr instr) {
420 return (instr & ~kRtMask) == kPopRegPattern;
421}
422
423
424bool Assembler::IsPush(Instr instr) {
425 return (instr & ~kRtMask) == kPushRegPattern;
426}
427
428
429bool Assembler::IsSwRegFpOffset(Instr instr) {
430 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
431}
432
433
434bool Assembler::IsLwRegFpOffset(Instr instr) {
435 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
436}
437
438
439bool Assembler::IsSwRegFpNegOffset(Instr instr) {
440 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
441 kSwRegFpNegOffsetPattern);
442}
443
444
445bool Assembler::IsLwRegFpNegOffset(Instr instr) {
446 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
447 kLwRegFpNegOffsetPattern);
448}
449
450
451// Labels refer to positions in the (to be) generated code.
452// There are bound, linked, and unused labels.
453//
454// Bound labels refer to known positions in the already
455// generated code. pos() is the position the label refers to.
456//
457// Linked labels refer to unknown positions in the code
458// to be generated; pos() is the position of the last
459// instruction using the label.
460
461// The link chain is terminated by a value in the instruction of -1,
462// which is an otherwise illegal value (branch -1 is inf loop).
463// The instruction 16-bit offset field addresses 32-bit words, but in
464// code is conv to an 18-bit value addressing bytes, hence the -4 value.
465
466const int kEndOfChain = -4;
467// Determines the end of the Jump chain (a subset of the label link chain).
468const int kEndOfJumpChain = 0;
469
470
471bool Assembler::IsBranch(Instr instr) {
472 uint32_t opcode = GetOpcodeField(instr);
473 uint32_t rt_field = GetRtField(instr);
474 uint32_t rs_field = GetRsField(instr);
475 // Checks if the instruction is a branch.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000476 bool isBranch =
477 opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
478 opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000479 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
480 rt_field == BLTZAL || rt_field == BGEZAL)) ||
481 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
482 (opcode == COP1 && rs_field == BC1EQZ) ||
483 (opcode == COP1 && rs_field == BC1NEZ);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000484 if (!isBranch && kArchVariant == kMips64r6) {
485 // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
486 // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
487 isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
488 opcode == BALC ||
489 (opcode == POP66 && rs_field != 0) || // BEQZC
490 (opcode == POP76 && rs_field != 0); // BNEZC
491 }
492 return isBranch;
493}
494
495
496bool Assembler::IsBc(Instr instr) {
497 uint32_t opcode = GetOpcodeField(instr);
498 // Checks if the instruction is a BC or BALC.
499 return opcode == BC || opcode == BALC;
500}
501
502
503bool Assembler::IsBzc(Instr instr) {
504 uint32_t opcode = GetOpcodeField(instr);
505 // Checks if the instruction is BEQZC or BNEZC.
506 return (opcode == POP66 && GetRsField(instr) != 0) ||
507 (opcode == POP76 && GetRsField(instr) != 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000508}
509
510
511bool Assembler::IsEmittedConstant(Instr instr) {
512 uint32_t label_constant = GetLabelConst(instr);
513 return label_constant == 0; // Emitted label const in reg-exp engine.
514}
515
516
517bool Assembler::IsBeq(Instr instr) {
518 return GetOpcodeField(instr) == BEQ;
519}
520
521
522bool Assembler::IsBne(Instr instr) {
523 return GetOpcodeField(instr) == BNE;
524}
525
526
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000527bool Assembler::IsBeqzc(Instr instr) {
528 uint32_t opcode = GetOpcodeField(instr);
529 return opcode == POP66 && GetRsField(instr) != 0;
530}
531
532
533bool Assembler::IsBnezc(Instr instr) {
534 uint32_t opcode = GetOpcodeField(instr);
535 return opcode == POP76 && GetRsField(instr) != 0;
536}
537
538
539bool Assembler::IsBeqc(Instr instr) {
540 uint32_t opcode = GetOpcodeField(instr);
541 uint32_t rs = GetRsField(instr);
542 uint32_t rt = GetRtField(instr);
543 return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
544}
545
546
547bool Assembler::IsBnec(Instr instr) {
548 uint32_t opcode = GetOpcodeField(instr);
549 uint32_t rs = GetRsField(instr);
550 uint32_t rt = GetRtField(instr);
551 return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
552}
553
554
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000555bool Assembler::IsJump(Instr instr) {
556 uint32_t opcode = GetOpcodeField(instr);
557 uint32_t rt_field = GetRtField(instr);
558 uint32_t rd_field = GetRdField(instr);
559 uint32_t function_field = GetFunctionField(instr);
560 // Checks if the instruction is a jump.
561 return opcode == J || opcode == JAL ||
562 (opcode == SPECIAL && rt_field == 0 &&
563 ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
564}
565
566
567bool Assembler::IsJ(Instr instr) {
568 uint32_t opcode = GetOpcodeField(instr);
569 // Checks if the instruction is a jump.
570 return opcode == J;
571}
572
573
574bool Assembler::IsJal(Instr instr) {
575 return GetOpcodeField(instr) == JAL;
576}
577
578
579bool Assembler::IsJr(Instr instr) {
580 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
581}
582
583
584bool Assembler::IsJalr(Instr instr) {
585 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
586}
587
588
589bool Assembler::IsLui(Instr instr) {
590 uint32_t opcode = GetOpcodeField(instr);
591 // Checks if the instruction is a load upper immediate.
592 return opcode == LUI;
593}
594
595
596bool Assembler::IsOri(Instr instr) {
597 uint32_t opcode = GetOpcodeField(instr);
598 // Checks if the instruction is a load upper immediate.
599 return opcode == ORI;
600}
601
602
603bool Assembler::IsNop(Instr instr, unsigned int type) {
604 // See Assembler::nop(type).
605 DCHECK(type < 32);
606 uint32_t opcode = GetOpcodeField(instr);
607 uint32_t function = GetFunctionField(instr);
608 uint32_t rt = GetRt(instr);
609 uint32_t rd = GetRd(instr);
610 uint32_t sa = GetSa(instr);
611
612 // Traditional mips nop == sll(zero_reg, zero_reg, 0)
613 // When marking non-zero type, use sll(zero_reg, at, type)
614 // to avoid use of mips ssnop and ehb special encodings
615 // of the sll instruction.
616
617 Register nop_rt_reg = (type == 0) ? zero_reg : at;
618 bool ret = (opcode == SPECIAL && function == SLL &&
619 rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
620 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
621 sa == type);
622
623 return ret;
624}
625
626
627int32_t Assembler::GetBranchOffset(Instr instr) {
628 DCHECK(IsBranch(instr));
629 return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
630}
631
632
633bool Assembler::IsLw(Instr instr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000634 return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000635}
636
637
638int16_t Assembler::GetLwOffset(Instr instr) {
639 DCHECK(IsLw(instr));
640 return ((instr & kImm16Mask));
641}
642
643
644Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
645 DCHECK(IsLw(instr));
646
647 // We actually create a new lw instruction based on the original one.
648 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
649 | (offset & kImm16Mask);
650
651 return temp_instr;
652}
653
654
655bool Assembler::IsSw(Instr instr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000656 return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000657}
658
659
660Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
661 DCHECK(IsSw(instr));
662 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
663}
664
665
666bool Assembler::IsAddImmediate(Instr instr) {
667 return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
668}
669
670
671Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
672 DCHECK(IsAddImmediate(instr));
673 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
674}
675
676
677bool Assembler::IsAndImmediate(Instr instr) {
678 return GetOpcodeField(instr) == ANDI;
679}
680
681
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000682static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
683 if (kArchVariant == kMips64r6) {
684 if (Assembler::IsBc(instr)) {
685 return Assembler::OffsetSize::kOffset26;
686 } else if (Assembler::IsBzc(instr)) {
687 return Assembler::OffsetSize::kOffset21;
688 }
689 }
690 return Assembler::OffsetSize::kOffset16;
691}
692
693
694static inline int32_t AddBranchOffset(int pos, Instr instr) {
695 int bits = OffsetSizeInBits(instr);
696 const int32_t mask = (1 << bits) - 1;
697 bits = 32 - bits;
698
699 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
700 // the compiler uses arithmetic shifts for signed integers.
701 int32_t imm = ((instr & mask) << bits) >> (bits - 2);
702
703 if (imm == kEndOfChain) {
704 // EndOfChain sentinel is returned directly, not relative to pc or pos.
705 return kEndOfChain;
706 } else {
707 return pos + Assembler::kBranchPCOffset + imm;
708 }
709}
710
711
712int Assembler::target_at(int pos, bool is_internal) {
713 if (is_internal) {
714 int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
715 int64_t address = *p;
716 if (address == kEndOfJumpChain) {
717 return kEndOfChain;
718 } else {
719 int64_t instr_address = reinterpret_cast<int64_t>(p);
720 DCHECK(instr_address - address < INT_MAX);
721 int delta = static_cast<int>(instr_address - address);
722 DCHECK(pos > delta);
723 return pos - delta;
724 }
725 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000726 Instr instr = instr_at(pos);
727 if ((instr & ~kImm16Mask) == 0) {
728 // Emitted label constant, not part of a branch.
729 if (instr == 0) {
730 return kEndOfChain;
731 } else {
732 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
733 return (imm18 + pos);
734 }
735 }
736 // Check we have a branch or jump instruction.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000737 DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000738 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
739 // the compiler uses arithmetic shifts for signed integers.
740 if (IsBranch(instr)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000741 return AddBranchOffset(pos, instr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000742 } else if (IsLui(instr)) {
743 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
744 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
745 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
746 DCHECK(IsOri(instr_ori));
747 DCHECK(IsOri(instr_ori2));
748
749 // TODO(plind) create named constants for shift values.
750 int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
751 imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
752 imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
753 // Sign extend address;
754 imm >>= 16;
755
756 if (imm == kEndOfJumpChain) {
757 // EndOfChain sentinel is returned directly, not relative to pc or pos.
758 return kEndOfChain;
759 } else {
760 uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000761 DCHECK(instr_address - imm < INT_MAX);
762 int delta = static_cast<int>(instr_address - imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000763 DCHECK(pos > delta);
764 return pos - delta;
765 }
766 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000767 DCHECK(IsJ(instr) || IsJal(instr));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000768 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
769 if (imm28 == kEndOfJumpChain) {
770 // EndOfChain sentinel is returned directly, not relative to pc or pos.
771 return kEndOfChain;
772 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000773 // Sign extend 28-bit offset.
774 int32_t delta = static_cast<int32_t>((imm28 << 4) >> 4);
775 return pos + delta;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000776 }
777 }
778}
779
780
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000781static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
782 Instr instr) {
783 int32_t bits = OffsetSizeInBits(instr);
784 int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
785 DCHECK((imm & 3) == 0);
786 imm >>= 2;
787
788 const int32_t mask = (1 << bits) - 1;
789 instr &= ~mask;
790 DCHECK(is_intn(imm, bits));
791
792 return instr | (imm & mask);
793}
794
795
796void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
797 if (is_internal) {
798 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
799 *reinterpret_cast<uint64_t*>(buffer_ + pos) = imm;
800 return;
801 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000802 Instr instr = instr_at(pos);
803 if ((instr & ~kImm16Mask) == 0) {
804 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
805 // Emitted label constant, not part of a branch.
806 // Make label relative to Code* of generated Code object.
807 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
808 return;
809 }
810
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000811 if (IsBranch(instr)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000812 instr = SetBranchOffset(pos, target_pos, instr);
813 instr_at_put(pos, instr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000814 } else if (IsLui(instr)) {
815 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
816 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
817 Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
818 DCHECK(IsOri(instr_ori));
819 DCHECK(IsOri(instr_ori2));
820
821 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
822 DCHECK((imm & 3) == 0);
823
824 instr_lui &= ~kImm16Mask;
825 instr_ori &= ~kImm16Mask;
826 instr_ori2 &= ~kImm16Mask;
827
828 instr_at_put(pos + 0 * Assembler::kInstrSize,
829 instr_lui | ((imm >> 32) & kImm16Mask));
830 instr_at_put(pos + 1 * Assembler::kInstrSize,
831 instr_ori | ((imm >> 16) & kImm16Mask));
832 instr_at_put(pos + 3 * Assembler::kInstrSize,
833 instr_ori2 | (imm & kImm16Mask));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000834 } else if (IsJ(instr) || IsJal(instr)) {
835 int32_t imm28 = target_pos - pos;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000836 DCHECK((imm28 & 3) == 0);
837
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000838 uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000839 DCHECK(is_uint26(imm26));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000840 // Place 26-bit signed offset with markings.
841 // When code is committed it will be resolved to j/jal.
842 int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark;
843 instr_at_put(pos, mark | (imm26 & kImm26Mask));
844 } else {
845 int32_t imm28 = target_pos - pos;
846 DCHECK((imm28 & 3) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000847
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000848 uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
849 DCHECK(is_uint26(imm26));
850 // Place raw 26-bit signed offset.
851 // When code is committed it will be resolved to j/jal.
852 instr &= ~kImm26Mask;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000853 instr_at_put(pos, instr | (imm26 & kImm26Mask));
854 }
855}
856
857
858void Assembler::print(Label* L) {
859 if (L->is_unused()) {
860 PrintF("unused label\n");
861 } else if (L->is_bound()) {
862 PrintF("bound label to %d\n", L->pos());
863 } else if (L->is_linked()) {
864 Label l = *L;
865 PrintF("unbound label");
866 while (l.is_linked()) {
867 PrintF("@ %d ", l.pos());
868 Instr instr = instr_at(l.pos());
869 if ((instr & ~kImm16Mask) == 0) {
870 PrintF("value\n");
871 } else {
872 PrintF("%d\n", instr);
873 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000874 next(&l, internal_reference_positions_.find(l.pos()) !=
875 internal_reference_positions_.end());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000876 }
877 } else {
878 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
879 }
880}
881
882
883void Assembler::bind_to(Label* L, int pos) {
884 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000885 int trampoline_pos = kInvalidSlotPos;
886 bool is_internal = false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000887 if (L->is_linked() && !trampoline_emitted_) {
888 unbound_labels_count_--;
889 next_buffer_check_ += kTrampolineSlotsSize;
890 }
891
892 while (L->is_linked()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000893 int fixup_pos = L->pos();
894 int dist = pos - fixup_pos;
895 is_internal = internal_reference_positions_.find(fixup_pos) !=
896 internal_reference_positions_.end();
897 next(L, is_internal); // Call next before overwriting link with target at
898 // fixup_pos.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000899 Instr instr = instr_at(fixup_pos);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000900 if (is_internal) {
901 target_at_put(fixup_pos, pos, is_internal);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000902 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000903 if (IsBranch(instr)) {
904 int branch_offset = BranchOffset(instr);
905 if (dist > branch_offset) {
906 if (trampoline_pos == kInvalidSlotPos) {
907 trampoline_pos = get_trampoline_entry(fixup_pos);
908 CHECK(trampoline_pos != kInvalidSlotPos);
909 }
910 CHECK((trampoline_pos - fixup_pos) <= branch_offset);
911 target_at_put(fixup_pos, trampoline_pos, false);
912 fixup_pos = trampoline_pos;
913 dist = pos - fixup_pos;
914 }
915 target_at_put(fixup_pos, pos, false);
916 } else {
917 DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
918 IsEmittedConstant(instr));
919 target_at_put(fixup_pos, pos, false);
920 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000921 }
922 }
923 L->bind_to(pos);
924
925 // Keep track of the last bound label so we don't eliminate any instructions
926 // before a bound label.
927 if (pos > last_bound_pos_)
928 last_bound_pos_ = pos;
929}
930
931
932void Assembler::bind(Label* L) {
933 DCHECK(!L->is_bound()); // Label can only be bound once.
934 bind_to(L, pc_offset());
935}
936
937
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000938void Assembler::next(Label* L, bool is_internal) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000939 DCHECK(L->is_linked());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000940 int link = target_at(L->pos(), is_internal);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000941 if (link == kEndOfChain) {
942 L->Unuse();
943 } else {
944 DCHECK(link >= 0);
945 L->link_to(link);
946 }
947}
948
949
950bool Assembler::is_near(Label* L) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000951 DCHECK(L->is_bound());
952 return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
953}
954
955
956bool Assembler::is_near(Label* L, OffsetSize bits) {
957 if (L == nullptr || !L->is_bound()) return true;
958 return ((pc_offset() - L->pos()) <
959 (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
960}
961
962
963bool Assembler::is_near_branch(Label* L) {
964 DCHECK(L->is_bound());
965 return kArchVariant == kMips64r6 ? is_near_r6(L) : is_near_pre_r6(L);
966}
967
968
969int Assembler::BranchOffset(Instr instr) {
970 // At pre-R6 and for other R6 branches the offset is 16 bits.
971 int bits = OffsetSize::kOffset16;
972
973 if (kArchVariant == kMips64r6) {
974 uint32_t opcode = GetOpcodeField(instr);
975 switch (opcode) {
976 // Checks BC or BALC.
977 case BC:
978 case BALC:
979 bits = OffsetSize::kOffset26;
980 break;
981
982 // Checks BEQZC or BNEZC.
983 case POP66:
984 case POP76:
985 if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
986 break;
987 default:
988 break;
989 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000990 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000991
992 return (1 << (bits + 2 - 1)) - 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000993}
994
995
996// We have to use a temporary register for things that can be relocated even
997// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
998// space. There is no guarantee that the relocated location can be similarly
999// encoded.
1000bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
1001 return !RelocInfo::IsNone(rmode);
1002}
1003
1004void Assembler::GenInstrRegister(Opcode opcode,
1005 Register rs,
1006 Register rt,
1007 Register rd,
1008 uint16_t sa,
1009 SecondaryField func) {
1010 DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
1011 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1012 | (rd.code() << kRdShift) | (sa << kSaShift) | func;
1013 emit(instr);
1014}
1015
1016
1017void Assembler::GenInstrRegister(Opcode opcode,
1018 Register rs,
1019 Register rt,
1020 uint16_t msb,
1021 uint16_t lsb,
1022 SecondaryField func) {
1023 DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
1024 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1025 | (msb << kRdShift) | (lsb << kSaShift) | func;
1026 emit(instr);
1027}
1028
1029
1030void Assembler::GenInstrRegister(Opcode opcode,
1031 SecondaryField fmt,
1032 FPURegister ft,
1033 FPURegister fs,
1034 FPURegister fd,
1035 SecondaryField func) {
1036 DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
1037 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
1038 | (fd.code() << kFdShift) | func;
1039 emit(instr);
1040}
1041
1042
1043void Assembler::GenInstrRegister(Opcode opcode,
1044 FPURegister fr,
1045 FPURegister ft,
1046 FPURegister fs,
1047 FPURegister fd,
1048 SecondaryField func) {
1049 DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
1050 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
1051 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1052 emit(instr);
1053}
1054
1055
1056void Assembler::GenInstrRegister(Opcode opcode,
1057 SecondaryField fmt,
1058 Register rt,
1059 FPURegister fs,
1060 FPURegister fd,
1061 SecondaryField func) {
1062 DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
1063 Instr instr = opcode | fmt | (rt.code() << kRtShift)
1064 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
1065 emit(instr);
1066}
1067
1068
1069void Assembler::GenInstrRegister(Opcode opcode,
1070 SecondaryField fmt,
1071 Register rt,
1072 FPUControlRegister fs,
1073 SecondaryField func) {
1074 DCHECK(fs.is_valid() && rt.is_valid());
1075 Instr instr =
1076 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
1077 emit(instr);
1078}
1079
1080
1081// Instructions with immediate value.
1082// Registers are in the order of the instruction encoding, from left to right.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001083void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
1084 int32_t j,
1085 CompactBranchType is_compact_branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001086 DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
1087 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1088 | (j & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001089 emit(instr, is_compact_branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001090}
1091
1092
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001093void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
1094 int32_t j,
1095 CompactBranchType is_compact_branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001096 DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
1097 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001098 emit(instr, is_compact_branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001099}
1100
1101
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001102void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
1103 int32_t j,
1104 CompactBranchType is_compact_branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001105 DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
1106 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
1107 | (j & kImm16Mask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001108 emit(instr, is_compact_branch);
1109}
1110
1111
1112void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
1113 CompactBranchType is_compact_branch) {
1114 DCHECK(rs.is_valid() && (is_int21(offset21)));
1115 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
1116 emit(instr, is_compact_branch);
1117}
1118
1119
1120void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
1121 uint32_t offset21) {
1122 DCHECK(rs.is_valid() && (is_uint21(offset21)));
1123 Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001124 emit(instr);
1125}
1126
1127
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001128void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
1129 CompactBranchType is_compact_branch) {
1130 DCHECK(is_int26(offset26));
1131 Instr instr = opcode | (offset26 & kImm26Mask);
1132 emit(instr, is_compact_branch);
1133}
1134
1135
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001136void Assembler::GenInstrJump(Opcode opcode,
1137 uint32_t address) {
1138 BlockTrampolinePoolScope block_trampoline_pool(this);
1139 DCHECK(is_uint26(address));
1140 Instr instr = opcode | address;
1141 emit(instr);
1142 BlockTrampolinePoolFor(1); // For associated delay slot.
1143}
1144
1145
1146// Returns the next free trampoline entry.
1147int32_t Assembler::get_trampoline_entry(int32_t pos) {
1148 int32_t trampoline_entry = kInvalidSlotPos;
1149 if (!internal_trampoline_exception_) {
1150 if (trampoline_.start() > pos) {
1151 trampoline_entry = trampoline_.take_slot();
1152 }
1153
1154 if (kInvalidSlotPos == trampoline_entry) {
1155 internal_trampoline_exception_ = true;
1156 }
1157 }
1158 return trampoline_entry;
1159}
1160
1161
1162uint64_t Assembler::jump_address(Label* L) {
1163 int64_t target_pos;
1164 if (L->is_bound()) {
1165 target_pos = L->pos();
1166 } else {
1167 if (L->is_linked()) {
1168 target_pos = L->pos(); // L's link.
1169 L->link_to(pc_offset());
1170 } else {
1171 L->link_to(pc_offset());
1172 return kEndOfJumpChain;
1173 }
1174 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001175 uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
1176 DCHECK((imm & 3) == 0);
1177
1178 return imm;
1179}
1180
1181
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001182uint64_t Assembler::jump_offset(Label* L) {
1183 int64_t target_pos;
1184 int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1185
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001186 if (L->is_bound()) {
1187 target_pos = L->pos();
1188 } else {
1189 if (L->is_linked()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001190 target_pos = L->pos(); // L's link.
1191 L->link_to(pc_offset() + pad);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001192 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001193 L->link_to(pc_offset() + pad);
1194 return kEndOfJumpChain;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001195 }
1196 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001197 int64_t imm = target_pos - (pc_offset() + pad);
1198 DCHECK((imm & 3) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001199
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001200 return static_cast<uint64_t>(imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001201}
1202
1203
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001204int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001205 int32_t target_pos;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001206 int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
1207
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001208 if (L->is_bound()) {
1209 target_pos = L->pos();
1210 } else {
1211 if (L->is_linked()) {
1212 target_pos = L->pos();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001213 L->link_to(pc_offset() + pad);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001214 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001215 L->link_to(pc_offset() + pad);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001216 if (!trampoline_emitted_) {
1217 unbound_labels_count_++;
1218 next_buffer_check_ -= kTrampolineSlotsSize;
1219 }
1220 return kEndOfChain;
1221 }
1222 }
1223
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001224 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
1225 DCHECK(is_intn(offset, bits + 2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001226 DCHECK((offset & 3) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001227
1228 return offset;
1229}
1230
1231
1232void Assembler::label_at_put(Label* L, int at_offset) {
1233 int target_pos;
1234 if (L->is_bound()) {
1235 target_pos = L->pos();
1236 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1237 } else {
1238 if (L->is_linked()) {
1239 target_pos = L->pos(); // L's link.
1240 int32_t imm18 = target_pos - at_offset;
1241 DCHECK((imm18 & 3) == 0);
1242 int32_t imm16 = imm18 >> 2;
1243 DCHECK(is_int16(imm16));
1244 instr_at_put(at_offset, (imm16 & kImm16Mask));
1245 } else {
1246 target_pos = kEndOfChain;
1247 instr_at_put(at_offset, 0);
1248 if (!trampoline_emitted_) {
1249 unbound_labels_count_++;
1250 next_buffer_check_ -= kTrampolineSlotsSize;
1251 }
1252 }
1253 L->link_to(at_offset);
1254 }
1255}
1256
1257
1258//------- Branch and jump instructions --------
1259
1260void Assembler::b(int16_t offset) {
1261 beq(zero_reg, zero_reg, offset);
1262}
1263
1264
1265void Assembler::bal(int16_t offset) {
1266 positions_recorder()->WriteRecordedPositions();
1267 bgezal(zero_reg, offset);
1268}
1269
1270
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001271void Assembler::bc(int32_t offset) {
1272 DCHECK(kArchVariant == kMips64r6);
1273 GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
1274}
1275
1276
1277void Assembler::balc(int32_t offset) {
1278 DCHECK(kArchVariant == kMips64r6);
1279 positions_recorder()->WriteRecordedPositions();
1280 GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
1281}
1282
1283
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001284void Assembler::beq(Register rs, Register rt, int16_t offset) {
1285 BlockTrampolinePoolScope block_trampoline_pool(this);
1286 GenInstrImmediate(BEQ, rs, rt, offset);
1287 BlockTrampolinePoolFor(1); // For associated delay slot.
1288}
1289
1290
1291void Assembler::bgez(Register rs, int16_t offset) {
1292 BlockTrampolinePoolScope block_trampoline_pool(this);
1293 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1294 BlockTrampolinePoolFor(1); // For associated delay slot.
1295}
1296
1297
1298void Assembler::bgezc(Register rt, int16_t offset) {
1299 DCHECK(kArchVariant == kMips64r6);
1300 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001301 GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001302}
1303
1304
1305void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1306 DCHECK(kArchVariant == kMips64r6);
1307 DCHECK(!(rs.is(zero_reg)));
1308 DCHECK(!(rt.is(zero_reg)));
1309 DCHECK(rs.code() != rt.code());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001310 GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001311}
1312
1313
1314void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1315 DCHECK(kArchVariant == kMips64r6);
1316 DCHECK(!(rs.is(zero_reg)));
1317 DCHECK(!(rt.is(zero_reg)));
1318 DCHECK(rs.code() != rt.code());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001319 GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001320}
1321
1322
1323void Assembler::bgezal(Register rs, int16_t offset) {
1324 DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1325 BlockTrampolinePoolScope block_trampoline_pool(this);
1326 positions_recorder()->WriteRecordedPositions();
1327 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1328 BlockTrampolinePoolFor(1); // For associated delay slot.
1329}
1330
1331
1332void Assembler::bgtz(Register rs, int16_t offset) {
1333 BlockTrampolinePoolScope block_trampoline_pool(this);
1334 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1335 BlockTrampolinePoolFor(1); // For associated delay slot.
1336}
1337
1338
1339void Assembler::bgtzc(Register rt, int16_t offset) {
1340 DCHECK(kArchVariant == kMips64r6);
1341 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001342 GenInstrImmediate(BGTZL, zero_reg, rt, offset,
1343 CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001344}
1345
1346
1347void Assembler::blez(Register rs, int16_t offset) {
1348 BlockTrampolinePoolScope block_trampoline_pool(this);
1349 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1350 BlockTrampolinePoolFor(1); // For associated delay slot.
1351}
1352
1353
1354void Assembler::blezc(Register rt, int16_t offset) {
1355 DCHECK(kArchVariant == kMips64r6);
1356 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001357 GenInstrImmediate(BLEZL, zero_reg, rt, offset,
1358 CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001359}
1360
1361
1362void Assembler::bltzc(Register rt, int16_t offset) {
1363 DCHECK(kArchVariant == kMips64r6);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001364 DCHECK(!rt.is(zero_reg));
1365 GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001366}
1367
1368
1369void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1370 DCHECK(kArchVariant == kMips64r6);
1371 DCHECK(!(rs.is(zero_reg)));
1372 DCHECK(!(rt.is(zero_reg)));
1373 DCHECK(rs.code() != rt.code());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001374 GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001375}
1376
1377
1378void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1379 DCHECK(kArchVariant == kMips64r6);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001380 DCHECK(!rs.is(zero_reg));
1381 DCHECK(!rt.is(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001382 DCHECK(rs.code() != rt.code());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001383 GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001384}
1385
1386
1387void Assembler::bltz(Register rs, int16_t offset) {
1388 BlockTrampolinePoolScope block_trampoline_pool(this);
1389 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1390 BlockTrampolinePoolFor(1); // For associated delay slot.
1391}
1392
1393
1394void Assembler::bltzal(Register rs, int16_t offset) {
1395 DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
1396 BlockTrampolinePoolScope block_trampoline_pool(this);
1397 positions_recorder()->WriteRecordedPositions();
1398 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1399 BlockTrampolinePoolFor(1); // For associated delay slot.
1400}
1401
1402
1403void Assembler::bne(Register rs, Register rt, int16_t offset) {
1404 BlockTrampolinePoolScope block_trampoline_pool(this);
1405 GenInstrImmediate(BNE, rs, rt, offset);
1406 BlockTrampolinePoolFor(1); // For associated delay slot.
1407}
1408
1409
1410void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1411 DCHECK(kArchVariant == kMips64r6);
Ben Murdochda12d292016-06-02 14:46:10 +01001412 if (rs.code() >= rt.code()) {
1413 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1414 } else {
1415 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1416 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001417}
1418
1419
1420void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1421 DCHECK(kArchVariant == kMips64r6);
Ben Murdochda12d292016-06-02 14:46:10 +01001422 if (rs.code() >= rt.code()) {
1423 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1424 } else {
1425 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1426 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001427}
1428
1429
1430void Assembler::blezalc(Register rt, int16_t offset) {
1431 DCHECK(kArchVariant == kMips64r6);
1432 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001433 positions_recorder()->WriteRecordedPositions();
1434 GenInstrImmediate(BLEZ, zero_reg, rt, offset,
1435 CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001436}
1437
1438
1439void Assembler::bgezalc(Register rt, int16_t offset) {
1440 DCHECK(kArchVariant == kMips64r6);
1441 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001442 positions_recorder()->WriteRecordedPositions();
1443 GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001444}
1445
1446
1447void Assembler::bgezall(Register rs, int16_t offset) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001448 DCHECK(kArchVariant != kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001449 DCHECK(!(rs.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001450 BlockTrampolinePoolScope block_trampoline_pool(this);
1451 positions_recorder()->WriteRecordedPositions();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001452 GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001453 BlockTrampolinePoolFor(1); // For associated delay slot.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001454}
1455
1456
1457void Assembler::bltzalc(Register rt, int16_t offset) {
1458 DCHECK(kArchVariant == kMips64r6);
1459 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001460 positions_recorder()->WriteRecordedPositions();
1461 GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001462}
1463
1464
1465void Assembler::bgtzalc(Register rt, int16_t offset) {
1466 DCHECK(kArchVariant == kMips64r6);
1467 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001468 positions_recorder()->WriteRecordedPositions();
1469 GenInstrImmediate(BGTZ, zero_reg, rt, offset,
1470 CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001471}
1472
1473
1474void Assembler::beqzalc(Register rt, int16_t offset) {
1475 DCHECK(kArchVariant == kMips64r6);
1476 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001477 positions_recorder()->WriteRecordedPositions();
1478 GenInstrImmediate(ADDI, zero_reg, rt, offset,
1479 CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001480}
1481
1482
1483void Assembler::bnezalc(Register rt, int16_t offset) {
1484 DCHECK(kArchVariant == kMips64r6);
1485 DCHECK(!(rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001486 positions_recorder()->WriteRecordedPositions();
1487 GenInstrImmediate(DADDI, zero_reg, rt, offset,
1488 CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001489}
1490
1491
1492void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1493 DCHECK(kArchVariant == kMips64r6);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001494 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1495 if (rs.code() < rt.code()) {
1496 GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1497 } else {
1498 GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1499 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001500}
1501
1502
1503void Assembler::beqzc(Register rs, int32_t offset) {
1504 DCHECK(kArchVariant == kMips64r6);
1505 DCHECK(!(rs.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001506 GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001507}
1508
1509
1510void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1511 DCHECK(kArchVariant == kMips64r6);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001512 DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
1513 if (rs.code() < rt.code()) {
1514 GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
1515 } else {
1516 GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
1517 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001518}
1519
1520
1521void Assembler::bnezc(Register rs, int32_t offset) {
1522 DCHECK(kArchVariant == kMips64r6);
1523 DCHECK(!(rs.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001524 GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001525}
1526
1527
1528void Assembler::j(int64_t target) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001529 BlockTrampolinePoolScope block_trampoline_pool(this);
1530 GenInstrJump(J, static_cast<uint32_t>(target >> 2) & kImm26Mask);
1531 BlockTrampolinePoolFor(1); // For associated delay slot.
1532}
1533
1534
1535void Assembler::j(Label* target) {
1536 uint64_t imm = jump_offset(target);
1537 if (target->is_bound()) {
1538 BlockTrampolinePoolScope block_trampoline_pool(this);
1539 GenInstrJump(static_cast<Opcode>(kJRawMark),
1540 static_cast<uint32_t>(imm >> 2) & kImm26Mask);
1541 BlockTrampolinePoolFor(1); // For associated delay slot.
1542 } else {
1543 j(imm);
1544 }
1545}
1546
1547
1548void Assembler::jal(Label* target) {
1549 uint64_t imm = jump_offset(target);
1550 if (target->is_bound()) {
1551 BlockTrampolinePoolScope block_trampoline_pool(this);
1552 positions_recorder()->WriteRecordedPositions();
1553 GenInstrJump(static_cast<Opcode>(kJalRawMark),
1554 static_cast<uint32_t>(imm >> 2) & kImm26Mask);
1555 BlockTrampolinePoolFor(1); // For associated delay slot.
1556 } else {
1557 jal(imm);
1558 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001559}
1560
1561
1562void Assembler::jr(Register rs) {
1563 if (kArchVariant != kMips64r6) {
1564 BlockTrampolinePoolScope block_trampoline_pool(this);
1565 if (rs.is(ra)) {
1566 positions_recorder()->WriteRecordedPositions();
1567 }
1568 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1569 BlockTrampolinePoolFor(1); // For associated delay slot.
1570 } else {
1571 jalr(rs, zero_reg);
1572 }
1573}
1574
1575
1576void Assembler::jal(int64_t target) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001577 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001578 positions_recorder()->WriteRecordedPositions();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001579 GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
1580 BlockTrampolinePoolFor(1); // For associated delay slot.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001581}
1582
1583
1584void Assembler::jalr(Register rs, Register rd) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001585 DCHECK(rs.code() != rd.code());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001586 BlockTrampolinePoolScope block_trampoline_pool(this);
1587 positions_recorder()->WriteRecordedPositions();
1588 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1589 BlockTrampolinePoolFor(1); // For associated delay slot.
1590}
1591
1592
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001593void Assembler::jic(Register rt, int16_t offset) {
1594 DCHECK(kArchVariant == kMips64r6);
1595 GenInstrImmediate(POP66, zero_reg, rt, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001596}
1597
1598
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001599void Assembler::jialc(Register rt, int16_t offset) {
1600 DCHECK(kArchVariant == kMips64r6);
1601 positions_recorder()->WriteRecordedPositions();
1602 GenInstrImmediate(POP76, zero_reg, rt, offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001603}
1604
1605
1606// -------Data-processing-instructions---------
1607
1608// Arithmetic.
1609
1610void Assembler::addu(Register rd, Register rs, Register rt) {
1611 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1612}
1613
1614
1615void Assembler::addiu(Register rd, Register rs, int32_t j) {
1616 GenInstrImmediate(ADDIU, rs, rd, j);
1617}
1618
1619
1620void Assembler::subu(Register rd, Register rs, Register rt) {
1621 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1622}
1623
1624
1625void Assembler::mul(Register rd, Register rs, Register rt) {
1626 if (kArchVariant == kMips64r6) {
1627 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1628 } else {
1629 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1630 }
1631}
1632
1633
1634void Assembler::muh(Register rd, Register rs, Register rt) {
1635 DCHECK(kArchVariant == kMips64r6);
1636 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1637}
1638
1639
1640void Assembler::mulu(Register rd, Register rs, Register rt) {
1641 DCHECK(kArchVariant == kMips64r6);
1642 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1643}
1644
1645
1646void Assembler::muhu(Register rd, Register rs, Register rt) {
1647 DCHECK(kArchVariant == kMips64r6);
1648 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1649}
1650
1651
1652void Assembler::dmul(Register rd, Register rs, Register rt) {
1653 DCHECK(kArchVariant == kMips64r6);
1654 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
1655}
1656
1657
1658void Assembler::dmuh(Register rd, Register rs, Register rt) {
1659 DCHECK(kArchVariant == kMips64r6);
1660 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
1661}
1662
1663
1664void Assembler::dmulu(Register rd, Register rs, Register rt) {
1665 DCHECK(kArchVariant == kMips64r6);
1666 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
1667}
1668
1669
1670void Assembler::dmuhu(Register rd, Register rs, Register rt) {
1671 DCHECK(kArchVariant == kMips64r6);
1672 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
1673}
1674
1675
1676void Assembler::mult(Register rs, Register rt) {
1677 DCHECK(kArchVariant != kMips64r6);
1678 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1679}
1680
1681
1682void Assembler::multu(Register rs, Register rt) {
1683 DCHECK(kArchVariant != kMips64r6);
1684 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1685}
1686
1687
1688void Assembler::daddiu(Register rd, Register rs, int32_t j) {
1689 GenInstrImmediate(DADDIU, rs, rd, j);
1690}
1691
1692
1693void Assembler::div(Register rs, Register rt) {
1694 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1695}
1696
1697
1698void Assembler::div(Register rd, Register rs, Register rt) {
1699 DCHECK(kArchVariant == kMips64r6);
1700 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1701}
1702
1703
1704void Assembler::mod(Register rd, Register rs, Register rt) {
1705 DCHECK(kArchVariant == kMips64r6);
1706 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1707}
1708
1709
1710void Assembler::divu(Register rs, Register rt) {
1711 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1712}
1713
1714
1715void Assembler::divu(Register rd, Register rs, Register rt) {
1716 DCHECK(kArchVariant == kMips64r6);
1717 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1718}
1719
1720
1721void Assembler::modu(Register rd, Register rs, Register rt) {
1722 DCHECK(kArchVariant == kMips64r6);
1723 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
1724}
1725
1726
1727void Assembler::daddu(Register rd, Register rs, Register rt) {
1728 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
1729}
1730
1731
1732void Assembler::dsubu(Register rd, Register rs, Register rt) {
1733 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
1734}
1735
1736
1737void Assembler::dmult(Register rs, Register rt) {
1738 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
1739}
1740
1741
1742void Assembler::dmultu(Register rs, Register rt) {
1743 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
1744}
1745
1746
1747void Assembler::ddiv(Register rs, Register rt) {
1748 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
1749}
1750
1751
1752void Assembler::ddiv(Register rd, Register rs, Register rt) {
1753 DCHECK(kArchVariant == kMips64r6);
1754 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
1755}
1756
1757
1758void Assembler::dmod(Register rd, Register rs, Register rt) {
1759 DCHECK(kArchVariant == kMips64r6);
1760 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
1761}
1762
1763
1764void Assembler::ddivu(Register rs, Register rt) {
1765 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
1766}
1767
1768
1769void Assembler::ddivu(Register rd, Register rs, Register rt) {
1770 DCHECK(kArchVariant == kMips64r6);
1771 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
1772}
1773
1774
1775void Assembler::dmodu(Register rd, Register rs, Register rt) {
1776 DCHECK(kArchVariant == kMips64r6);
1777 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
1778}
1779
1780
1781// Logical.
1782
1783void Assembler::and_(Register rd, Register rs, Register rt) {
1784 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1785}
1786
1787
1788void Assembler::andi(Register rt, Register rs, int32_t j) {
1789 DCHECK(is_uint16(j));
1790 GenInstrImmediate(ANDI, rs, rt, j);
1791}
1792
1793
1794void Assembler::or_(Register rd, Register rs, Register rt) {
1795 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1796}
1797
1798
1799void Assembler::ori(Register rt, Register rs, int32_t j) {
1800 DCHECK(is_uint16(j));
1801 GenInstrImmediate(ORI, rs, rt, j);
1802}
1803
1804
1805void Assembler::xor_(Register rd, Register rs, Register rt) {
1806 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1807}
1808
1809
1810void Assembler::xori(Register rt, Register rs, int32_t j) {
1811 DCHECK(is_uint16(j));
1812 GenInstrImmediate(XORI, rs, rt, j);
1813}
1814
1815
1816void Assembler::nor(Register rd, Register rs, Register rt) {
1817 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1818}
1819
1820
1821// Shifts.
1822void Assembler::sll(Register rd,
1823 Register rt,
1824 uint16_t sa,
1825 bool coming_from_nop) {
1826 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1827 // generated using the sll instruction. They must be generated using
1828 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1829 // instructions.
1830 DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001831 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001832}
1833
1834
1835void Assembler::sllv(Register rd, Register rt, Register rs) {
1836 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1837}
1838
1839
1840void Assembler::srl(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001841 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001842}
1843
1844
1845void Assembler::srlv(Register rd, Register rt, Register rs) {
1846 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1847}
1848
1849
1850void Assembler::sra(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001851 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001852}
1853
1854
1855void Assembler::srav(Register rd, Register rt, Register rs) {
1856 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1857}
1858
1859
1860void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1861 // Should be called via MacroAssembler::Ror.
1862 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001863 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001864 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1865 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1866 emit(instr);
1867}
1868
1869
1870void Assembler::rotrv(Register rd, Register rt, Register rs) {
1871 // Should be called via MacroAssembler::Ror.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001872 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
1873 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001874 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1875 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1876 emit(instr);
1877}
1878
1879
1880void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001881 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001882}
1883
1884
1885void Assembler::dsllv(Register rd, Register rt, Register rs) {
1886 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
1887}
1888
1889
1890void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001891 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001892}
1893
1894
1895void Assembler::dsrlv(Register rd, Register rt, Register rs) {
1896 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
1897}
1898
1899
1900void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
1901 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1902 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1903 | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
1904 emit(instr);
1905}
1906
Ben Murdochda12d292016-06-02 14:46:10 +01001907void Assembler::drotr32(Register rd, Register rt, uint16_t sa) {
1908 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1909 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
1910 (rd.code() << kRdShift) | (sa << kSaShift) | DSRL32;
1911 emit(instr);
1912}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001913
1914void Assembler::drotrv(Register rd, Register rt, Register rs) {
1915 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1916 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1917 | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
1918 emit(instr);
1919}
1920
1921
1922void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001923 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001924}
1925
1926
1927void Assembler::dsrav(Register rd, Register rt, Register rs) {
1928 GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
1929}
1930
1931
1932void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001933 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001934}
1935
1936
1937void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001938 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001939}
1940
1941
1942void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001943 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA32);
1944}
1945
1946
1947void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
1948 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
Ben Murdochda12d292016-06-02 14:46:10 +01001949 DCHECK(sa <= 3);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001950 DCHECK(kArchVariant == kMips64r6);
Ben Murdochda12d292016-06-02 14:46:10 +01001951 Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
1952 rd.code() << kRdShift | sa << kSaShift | LSA;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001953 emit(instr);
1954}
1955
1956
1957void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
1958 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
Ben Murdochda12d292016-06-02 14:46:10 +01001959 DCHECK(sa <= 3);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001960 DCHECK(kArchVariant == kMips64r6);
Ben Murdochda12d292016-06-02 14:46:10 +01001961 Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift |
1962 rd.code() << kRdShift | sa << kSaShift | DLSA;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001963 emit(instr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001964}
1965
1966
1967// ------------Memory-instructions-------------
1968
1969// Helper for base-reg + offset, when offset is larger than int16.
1970void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1971 DCHECK(!src.rm().is(at));
1972 DCHECK(is_int32(src.offset_));
1973 daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
1974 dsll(at, at, kLuiShift);
1975 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1976 daddu(at, at, src.rm()); // Add base register.
1977}
1978
1979
1980void Assembler::lb(Register rd, const MemOperand& rs) {
1981 if (is_int16(rs.offset_)) {
1982 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1983 } else { // Offset > 16 bits, use multiple instructions to load.
1984 LoadRegPlusOffsetToAt(rs);
1985 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1986 }
1987}
1988
1989
1990void Assembler::lbu(Register rd, const MemOperand& rs) {
1991 if (is_int16(rs.offset_)) {
1992 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1993 } else { // Offset > 16 bits, use multiple instructions to load.
1994 LoadRegPlusOffsetToAt(rs);
1995 GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1996 }
1997}
1998
1999
2000void Assembler::lh(Register rd, const MemOperand& rs) {
2001 if (is_int16(rs.offset_)) {
2002 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
2003 } else { // Offset > 16 bits, use multiple instructions to load.
2004 LoadRegPlusOffsetToAt(rs);
2005 GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
2006 }
2007}
2008
2009
2010void Assembler::lhu(Register rd, const MemOperand& rs) {
2011 if (is_int16(rs.offset_)) {
2012 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
2013 } else { // Offset > 16 bits, use multiple instructions to load.
2014 LoadRegPlusOffsetToAt(rs);
2015 GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
2016 }
2017}
2018
2019
2020void Assembler::lw(Register rd, const MemOperand& rs) {
2021 if (is_int16(rs.offset_)) {
2022 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
2023 } else { // Offset > 16 bits, use multiple instructions to load.
2024 LoadRegPlusOffsetToAt(rs);
2025 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
2026 }
2027}
2028
2029
2030void Assembler::lwu(Register rd, const MemOperand& rs) {
2031 if (is_int16(rs.offset_)) {
2032 GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
2033 } else { // Offset > 16 bits, use multiple instructions to load.
2034 LoadRegPlusOffsetToAt(rs);
2035 GenInstrImmediate(LWU, at, rd, 0); // Equiv to lwu(rd, MemOperand(at, 0));
2036 }
2037}
2038
2039
2040void Assembler::lwl(Register rd, const MemOperand& rs) {
Ben Murdochc5610432016-08-08 18:44:38 +01002041 DCHECK(is_int16(rs.offset_));
2042 DCHECK(kArchVariant == kMips64r2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002043 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
2044}
2045
2046
2047void Assembler::lwr(Register rd, const MemOperand& rs) {
Ben Murdochc5610432016-08-08 18:44:38 +01002048 DCHECK(is_int16(rs.offset_));
2049 DCHECK(kArchVariant == kMips64r2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002050 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
2051}
2052
2053
2054void Assembler::sb(Register rd, const MemOperand& rs) {
2055 if (is_int16(rs.offset_)) {
2056 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
2057 } else { // Offset > 16 bits, use multiple instructions to store.
2058 LoadRegPlusOffsetToAt(rs);
2059 GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
2060 }
2061}
2062
2063
2064void Assembler::sh(Register rd, const MemOperand& rs) {
2065 if (is_int16(rs.offset_)) {
2066 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
2067 } else { // Offset > 16 bits, use multiple instructions to store.
2068 LoadRegPlusOffsetToAt(rs);
2069 GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
2070 }
2071}
2072
2073
2074void Assembler::sw(Register rd, const MemOperand& rs) {
2075 if (is_int16(rs.offset_)) {
2076 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
2077 } else { // Offset > 16 bits, use multiple instructions to store.
2078 LoadRegPlusOffsetToAt(rs);
2079 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
2080 }
2081}
2082
2083
2084void Assembler::swl(Register rd, const MemOperand& rs) {
Ben Murdochc5610432016-08-08 18:44:38 +01002085 DCHECK(is_int16(rs.offset_));
2086 DCHECK(kArchVariant == kMips64r2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002087 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
2088}
2089
2090
2091void Assembler::swr(Register rd, const MemOperand& rs) {
Ben Murdochc5610432016-08-08 18:44:38 +01002092 DCHECK(is_int16(rs.offset_));
2093 DCHECK(kArchVariant == kMips64r2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002094 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
2095}
2096
2097
2098void Assembler::lui(Register rd, int32_t j) {
2099 DCHECK(is_uint16(j));
2100 GenInstrImmediate(LUI, zero_reg, rd, j);
2101}
2102
2103
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002104void Assembler::aui(Register rt, Register rs, int32_t j) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002105 // This instruction uses same opcode as 'lui'. The difference in encoding is
2106 // 'lui' has zero reg. for rs field.
2107 DCHECK(is_uint16(j));
2108 GenInstrImmediate(LUI, rs, rt, j);
2109}
2110
2111
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002112void Assembler::daui(Register rt, Register rs, int32_t j) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002113 DCHECK(is_uint16(j));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002114 DCHECK(!rs.is(zero_reg));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002115 GenInstrImmediate(DAUI, rs, rt, j);
2116}
2117
2118
2119void Assembler::dahi(Register rs, int32_t j) {
2120 DCHECK(is_uint16(j));
2121 GenInstrImmediate(REGIMM, rs, DAHI, j);
2122}
2123
2124
2125void Assembler::dati(Register rs, int32_t j) {
2126 DCHECK(is_uint16(j));
2127 GenInstrImmediate(REGIMM, rs, DATI, j);
2128}
2129
2130
2131void Assembler::ldl(Register rd, const MemOperand& rs) {
Ben Murdochc5610432016-08-08 18:44:38 +01002132 DCHECK(is_int16(rs.offset_));
2133 DCHECK(kArchVariant == kMips64r2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002134 GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
2135}
2136
2137
2138void Assembler::ldr(Register rd, const MemOperand& rs) {
Ben Murdochc5610432016-08-08 18:44:38 +01002139 DCHECK(is_int16(rs.offset_));
2140 DCHECK(kArchVariant == kMips64r2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002141 GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
2142}
2143
2144
2145void Assembler::sdl(Register rd, const MemOperand& rs) {
Ben Murdochc5610432016-08-08 18:44:38 +01002146 DCHECK(is_int16(rs.offset_));
2147 DCHECK(kArchVariant == kMips64r2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002148 GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
2149}
2150
2151
2152void Assembler::sdr(Register rd, const MemOperand& rs) {
Ben Murdochc5610432016-08-08 18:44:38 +01002153 DCHECK(is_int16(rs.offset_));
2154 DCHECK(kArchVariant == kMips64r2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002155 GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
2156}
2157
2158
2159void Assembler::ld(Register rd, const MemOperand& rs) {
2160 if (is_int16(rs.offset_)) {
2161 GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
2162 } else { // Offset > 16 bits, use multiple instructions to load.
2163 LoadRegPlusOffsetToAt(rs);
2164 GenInstrImmediate(LD, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
2165 }
2166}
2167
2168
2169void Assembler::sd(Register rd, const MemOperand& rs) {
2170 if (is_int16(rs.offset_)) {
2171 GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
2172 } else { // Offset > 16 bits, use multiple instructions to store.
2173 LoadRegPlusOffsetToAt(rs);
2174 GenInstrImmediate(SD, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
2175 }
2176}
2177
2178
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002179// ---------PC-Relative instructions-----------
2180
2181void Assembler::addiupc(Register rs, int32_t imm19) {
2182 DCHECK(kArchVariant == kMips64r6);
2183 DCHECK(rs.is_valid() && is_int19(imm19));
2184 uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
2185 GenInstrImmediate(PCREL, rs, imm21);
2186}
2187
2188
2189void Assembler::lwpc(Register rs, int32_t offset19) {
2190 DCHECK(kArchVariant == kMips64r6);
2191 DCHECK(rs.is_valid() && is_int19(offset19));
2192 uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
2193 GenInstrImmediate(PCREL, rs, imm21);
2194}
2195
2196
2197void Assembler::lwupc(Register rs, int32_t offset19) {
2198 DCHECK(kArchVariant == kMips64r6);
2199 DCHECK(rs.is_valid() && is_int19(offset19));
2200 uint32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
2201 GenInstrImmediate(PCREL, rs, imm21);
2202}
2203
2204
2205void Assembler::ldpc(Register rs, int32_t offset18) {
2206 DCHECK(kArchVariant == kMips64r6);
2207 DCHECK(rs.is_valid() && is_int18(offset18));
2208 uint32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
2209 GenInstrImmediate(PCREL, rs, imm21);
2210}
2211
2212
2213void Assembler::auipc(Register rs, int16_t imm16) {
2214 DCHECK(kArchVariant == kMips64r6);
2215 DCHECK(rs.is_valid());
2216 uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
2217 GenInstrImmediate(PCREL, rs, imm21);
2218}
2219
2220
2221void Assembler::aluipc(Register rs, int16_t imm16) {
2222 DCHECK(kArchVariant == kMips64r6);
2223 DCHECK(rs.is_valid());
2224 uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
2225 GenInstrImmediate(PCREL, rs, imm21);
2226}
2227
2228
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002229// -------------Misc-instructions--------------
2230
2231// Break / Trap instructions.
2232void Assembler::break_(uint32_t code, bool break_as_stop) {
2233 DCHECK((code & ~0xfffff) == 0);
2234 // We need to invalidate breaks that could be stops as well because the
2235 // simulator expects a char pointer after the stop instruction.
2236 // See constants-mips.h for explanation.
2237 DCHECK((break_as_stop &&
2238 code <= kMaxStopCode &&
2239 code > kMaxWatchpointCode) ||
2240 (!break_as_stop &&
2241 (code > kMaxStopCode ||
2242 code <= kMaxWatchpointCode)));
2243 Instr break_instr = SPECIAL | BREAK | (code << 6);
2244 emit(break_instr);
2245}
2246
2247
2248void Assembler::stop(const char* msg, uint32_t code) {
2249 DCHECK(code > kMaxWatchpointCode);
2250 DCHECK(code <= kMaxStopCode);
2251#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
2252 break_(0x54321);
2253#else // V8_HOST_ARCH_MIPS
2254 BlockTrampolinePoolFor(3);
2255 // The Simulator will handle the stop instruction and get the message address.
2256 // On MIPS stop() is just a special kind of break_().
2257 break_(code, true);
2258 emit(reinterpret_cast<uint64_t>(msg));
2259#endif
2260}
2261
2262
2263void Assembler::tge(Register rs, Register rt, uint16_t code) {
2264 DCHECK(is_uint10(code));
2265 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
2266 | rt.code() << kRtShift | code << 6;
2267 emit(instr);
2268}
2269
2270
2271void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
2272 DCHECK(is_uint10(code));
2273 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
2274 | rt.code() << kRtShift | code << 6;
2275 emit(instr);
2276}
2277
2278
2279void Assembler::tlt(Register rs, Register rt, uint16_t code) {
2280 DCHECK(is_uint10(code));
2281 Instr instr =
2282 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2283 emit(instr);
2284}
2285
2286
2287void Assembler::tltu(Register rs, Register rt, uint16_t code) {
2288 DCHECK(is_uint10(code));
2289 Instr instr =
2290 SPECIAL | TLTU | rs.code() << kRsShift
2291 | rt.code() << kRtShift | code << 6;
2292 emit(instr);
2293}
2294
2295
2296void Assembler::teq(Register rs, Register rt, uint16_t code) {
2297 DCHECK(is_uint10(code));
2298 Instr instr =
2299 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2300 emit(instr);
2301}
2302
2303
2304void Assembler::tne(Register rs, Register rt, uint16_t code) {
2305 DCHECK(is_uint10(code));
2306 Instr instr =
2307 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
2308 emit(instr);
2309}
2310
Ben Murdochc5610432016-08-08 18:44:38 +01002311void Assembler::sync() {
2312 Instr sync_instr = SPECIAL | SYNC;
2313 emit(sync_instr);
2314}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002315
2316// Move from HI/LO register.
2317
2318void Assembler::mfhi(Register rd) {
2319 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
2320}
2321
2322
2323void Assembler::mflo(Register rd) {
2324 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
2325}
2326
2327
2328// Set on less than instructions.
2329void Assembler::slt(Register rd, Register rs, Register rt) {
2330 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
2331}
2332
2333
2334void Assembler::sltu(Register rd, Register rs, Register rt) {
2335 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
2336}
2337
2338
2339void Assembler::slti(Register rt, Register rs, int32_t j) {
2340 GenInstrImmediate(SLTI, rs, rt, j);
2341}
2342
2343
2344void Assembler::sltiu(Register rt, Register rs, int32_t j) {
2345 GenInstrImmediate(SLTIU, rs, rt, j);
2346}
2347
2348
2349// Conditional move.
2350void Assembler::movz(Register rd, Register rs, Register rt) {
2351 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
2352}
2353
2354
2355void Assembler::movn(Register rd, Register rs, Register rt) {
2356 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
2357}
2358
2359
2360void Assembler::movt(Register rd, Register rs, uint16_t cc) {
2361 Register rt;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002362 rt.reg_code = (cc & 0x0007) << 2 | 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002363 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2364}
2365
2366
2367void Assembler::movf(Register rd, Register rs, uint16_t cc) {
2368 Register rt;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002369 rt.reg_code = (cc & 0x0007) << 2 | 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002370 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
2371}
2372
2373
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002374void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2375 min(S, fd, fs, ft);
2376}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002377
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002378
2379void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2380 min(D, fd, fs, ft);
2381}
2382
2383
2384void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2385 max(S, fd, fs, ft);
2386}
2387
2388
2389void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2390 max(D, fd, fs, ft);
2391}
2392
2393
2394void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2395 mina(S, fd, fs, ft);
2396}
2397
2398
2399void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2400 mina(D, fd, fs, ft);
2401}
2402
2403
2404void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2405 maxa(S, fd, fs, ft);
2406}
2407
2408
2409void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2410 maxa(D, fd, fs, ft);
2411}
2412
2413
2414void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
2415 FPURegister ft) {
2416 DCHECK(kArchVariant == kMips64r6);
2417 DCHECK((fmt == D) || (fmt == S));
2418 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2419}
2420
2421
2422void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
2423 FPURegister ft) {
2424 DCHECK(kArchVariant == kMips64r6);
2425 DCHECK((fmt == D) || (fmt == S));
2426 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002427}
2428
2429
2430// GPR.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002431void Assembler::seleqz(Register rd, Register rs, Register rt) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002432 DCHECK(kArchVariant == kMips64r6);
2433 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
2434}
2435
2436
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002437// GPR.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002438void Assembler::selnez(Register rd, Register rs, Register rt) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002439 DCHECK(kArchVariant == kMips64r6);
2440 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
2441}
2442
2443
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002444// Bit twiddling.
2445void Assembler::clz(Register rd, Register rs) {
2446 if (kArchVariant != kMips64r6) {
2447 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
2448 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
2449 } else {
2450 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
2451 }
2452}
2453
2454
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002455void Assembler::dclz(Register rd, Register rs) {
2456 if (kArchVariant != kMips64r6) {
2457 // dclz instr requires same GPR number in 'rd' and 'rt' fields.
2458 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, DCLZ);
2459 } else {
2460 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, DCLZ_R6);
2461 }
2462}
2463
2464
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002465void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2466 // Should be called via MacroAssembler::Ins.
2467 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
2468 DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
2469 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
2470}
2471
2472
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002473void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2474 // Should be called via MacroAssembler::Dins.
2475 // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
2476 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2477 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
2478}
2479
2480
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002481void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
2482 // Should be called via MacroAssembler::Ext.
2483 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
2484 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2485 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
2486}
2487
2488
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002489void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002490 // Should be called via MacroAssembler::Dext.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002491 // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
2492 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2493 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
2494}
2495
2496
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002497void Assembler::dextm(Register rt, Register rs, uint16_t pos, uint16_t size) {
2498 // Should be called via MacroAssembler::Dextm.
2499 // Dextm instr has 'rt' field as dest, and two uint5: msb, lsb.
2500 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2501 GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
2502}
2503
2504
2505void Assembler::dextu(Register rt, Register rs, uint16_t pos, uint16_t size) {
2506 // Should be called via MacroAssembler::Dextu.
2507 // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
2508 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
2509 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
2510}
2511
2512
2513void Assembler::bitswap(Register rd, Register rt) {
2514 DCHECK(kArchVariant == kMips64r6);
2515 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
2516}
2517
2518
2519void Assembler::dbitswap(Register rd, Register rt) {
2520 DCHECK(kArchVariant == kMips64r6);
2521 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBSHFL);
2522}
2523
2524
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002525void Assembler::pref(int32_t hint, const MemOperand& rs) {
2526 DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
2527 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
2528 | (rs.offset_);
2529 emit(instr);
2530}
2531
2532
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002533void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
2534 DCHECK(kArchVariant == kMips64r6);
2535 DCHECK(is_uint3(bp));
2536 uint16_t sa = (ALIGN << kBp2Bits) | bp;
2537 GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
2538}
2539
2540
2541void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
2542 DCHECK(kArchVariant == kMips64r6);
2543 DCHECK(is_uint3(bp));
2544 uint16_t sa = (DALIGN << kBp3Bits) | bp;
2545 GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
2546}
2547
2548
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002549// --------Coprocessor-instructions----------------
2550
2551// Load, store, move.
2552void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002553 if (is_int16(src.offset_)) {
2554 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2555 } else { // Offset > 16 bits, use multiple instructions to load.
2556 LoadRegPlusOffsetToAt(src);
2557 GenInstrImmediate(LWC1, at, fd, 0);
2558 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002559}
2560
2561
2562void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002563 if (is_int16(src.offset_)) {
2564 GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
2565 } else { // Offset > 16 bits, use multiple instructions to load.
2566 LoadRegPlusOffsetToAt(src);
2567 GenInstrImmediate(LDC1, at, fd, 0);
2568 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002569}
2570
2571
2572void Assembler::swc1(FPURegister fd, const MemOperand& src) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002573 if (is_int16(src.offset_)) {
2574 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
2575 } else { // Offset > 16 bits, use multiple instructions to load.
2576 LoadRegPlusOffsetToAt(src);
2577 GenInstrImmediate(SWC1, at, fd, 0);
2578 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002579}
2580
2581
2582void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002583 DCHECK(!src.rm().is(at));
2584 if (is_int16(src.offset_)) {
2585 GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
2586 } else { // Offset > 16 bits, use multiple instructions to load.
2587 LoadRegPlusOffsetToAt(src);
2588 GenInstrImmediate(SDC1, at, fd, 0);
2589 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002590}
2591
2592
2593void Assembler::mtc1(Register rt, FPURegister fs) {
2594 GenInstrRegister(COP1, MTC1, rt, fs, f0);
2595}
2596
2597
2598void Assembler::mthc1(Register rt, FPURegister fs) {
2599 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2600}
2601
2602
2603void Assembler::dmtc1(Register rt, FPURegister fs) {
2604 GenInstrRegister(COP1, DMTC1, rt, fs, f0);
2605}
2606
2607
2608void Assembler::mfc1(Register rt, FPURegister fs) {
2609 GenInstrRegister(COP1, MFC1, rt, fs, f0);
2610}
2611
2612
2613void Assembler::mfhc1(Register rt, FPURegister fs) {
2614 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2615}
2616
2617
2618void Assembler::dmfc1(Register rt, FPURegister fs) {
2619 GenInstrRegister(COP1, DMFC1, rt, fs, f0);
2620}
2621
2622
2623void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2624 GenInstrRegister(COP1, CTC1, rt, fs);
2625}
2626
2627
2628void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2629 GenInstrRegister(COP1, CFC1, rt, fs);
2630}
2631
2632
2633void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2634 uint64_t i;
2635 memcpy(&i, &d, 8);
2636
2637 *lo = i & 0xffffffff;
2638 *hi = i >> 32;
2639}
2640
2641
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002642void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
2643 FPURegister ft) {
2644 DCHECK(kArchVariant == kMips64r6);
2645 DCHECK((fmt == D) || (fmt == S));
2646
2647 GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
2648}
2649
2650
2651void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2652 sel(S, fd, fs, ft);
2653}
2654
2655
2656void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2657 sel(D, fd, fs, ft);
2658}
2659
2660
2661// FPR.
2662void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
2663 FPURegister ft) {
2664 DCHECK((fmt == D) || (fmt == S));
2665 GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
2666}
2667
2668
2669void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2670 seleqz(D, fd, fs, ft);
2671}
2672
2673
2674void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2675 seleqz(S, fd, fs, ft);
2676}
2677
2678
2679void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2680 selnez(D, fd, fs, ft);
2681}
2682
2683
2684void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2685 selnez(S, fd, fs, ft);
2686}
2687
2688
2689void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
2690 DCHECK(kArchVariant == kMips64r2);
2691 GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
2692}
2693
2694
2695void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
2696 DCHECK(kArchVariant == kMips64r2);
2697 GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
2698}
2699
2700
2701void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2702 DCHECK(kArchVariant == kMips64r2);
2703 FPURegister ft;
2704 ft.reg_code = (cc & 0x0007) << 2 | 1;
2705 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2706}
2707
2708
2709void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2710 DCHECK(kArchVariant == kMips64r2);
2711 FPURegister ft;
2712 ft.reg_code = (cc & 0x0007) << 2 | 1;
2713 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2714}
2715
2716
2717void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
2718 DCHECK(kArchVariant == kMips64r2);
2719 FPURegister ft;
2720 ft.reg_code = (cc & 0x0007) << 2 | 0;
2721 GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
2722}
2723
2724
2725void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
2726 DCHECK(kArchVariant == kMips64r2);
2727 FPURegister ft;
2728 ft.reg_code = (cc & 0x0007) << 2 | 0;
2729 GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
2730}
2731
2732
2733void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
2734 DCHECK(kArchVariant == kMips64r2);
2735 GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
2736}
2737
2738
2739void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
2740 DCHECK(kArchVariant == kMips64r2);
2741 GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
2742}
2743
2744
2745// FPR.
2746void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
2747 FPURegister ft) {
2748 DCHECK(kArchVariant == kMips64r6);
2749 DCHECK((fmt == D) || (fmt == S));
2750 GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
2751}
2752
2753
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002754// Arithmetic.
2755
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002756void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2757 GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
2758}
2759
2760
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002761void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2762 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2763}
2764
2765
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002766void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2767 GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
2768}
2769
2770
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002771void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2772 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2773}
2774
2775
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002776void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2777 GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
2778}
2779
2780
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002781void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2782 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2783}
2784
2785
2786void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2787 FPURegister ft) {
2788 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2789}
2790
2791
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002792void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
2793 GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
2794}
2795
2796
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002797void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2798 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2799}
2800
2801
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002802void Assembler::abs_s(FPURegister fd, FPURegister fs) {
2803 GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
2804}
2805
2806
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002807void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2808 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2809}
2810
2811
2812void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2813 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2814}
2815
2816
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002817void Assembler::mov_s(FPURegister fd, FPURegister fs) {
2818 GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
2819}
2820
2821
2822void Assembler::neg_s(FPURegister fd, FPURegister fs) {
2823 GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
2824}
2825
2826
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002827void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2828 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2829}
2830
2831
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002832void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
2833 GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
2834}
2835
2836
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002837void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2838 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
2839}
2840
2841
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002842void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
2843 GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
2844}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002845
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002846
2847void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
2848 GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
2849}
2850
2851
2852void Assembler::recip_d(FPURegister fd, FPURegister fs) {
2853 GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
2854}
2855
2856
2857void Assembler::recip_s(FPURegister fd, FPURegister fs) {
2858 GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
2859}
2860
2861
2862// Conversions.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002863void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2864 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2865}
2866
2867
2868void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2869 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2870}
2871
2872
2873void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2874 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2875}
2876
2877
2878void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2879 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2880}
2881
2882
2883void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2884 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2885}
2886
2887
2888void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2889 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2890}
2891
2892
2893void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2894 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2895}
2896
2897
2898void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2899 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2900}
2901
2902
2903void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2904 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2905}
2906
2907
2908void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2909 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2910}
2911
2912
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002913void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
2914
2915
2916void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
2917
2918
2919void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
2920 DCHECK(kArchVariant == kMips64r6);
2921 GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
2922}
2923
2924
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002925void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002926 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002927 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2928}
2929
2930
2931void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002932 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002933 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2934}
2935
2936
2937void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002938 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002939 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2940}
2941
2942
2943void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002944 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002945 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2946}
2947
2948
2949void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2950 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2951}
2952
2953
2954void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2955 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2956}
2957
2958
2959void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2960 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2961}
2962
2963
2964void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2965 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2966}
2967
2968
2969void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2970 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2971}
2972
2973
2974void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2975 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2976}
2977
2978
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002979void Assembler::class_s(FPURegister fd, FPURegister fs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002980 DCHECK(kArchVariant == kMips64r6);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002981 GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002982}
2983
2984
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002985void Assembler::class_d(FPURegister fd, FPURegister fs) {
2986 DCHECK(kArchVariant == kMips64r6);
2987 GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
2988}
2989
2990
2991void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
2992 FPURegister ft) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002993 DCHECK(kArchVariant == kMips64r6);
2994 DCHECK((fmt == D) || (fmt == S));
2995 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2996}
2997
2998
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002999void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
3000 FPURegister ft) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003001 DCHECK(kArchVariant == kMips64r6);
3002 DCHECK((fmt == D) || (fmt == S));
3003 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
3004}
3005
3006
3007void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
3008 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
3009}
3010
3011
3012void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003013 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003014 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
3015}
3016
3017
3018void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
3019 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
3020}
3021
3022
3023void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
3024 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
3025}
3026
3027
3028void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003029 DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003030 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
3031}
3032
3033
3034void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
3035 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
3036}
3037
3038
3039// Conditions for >= MIPSr6.
3040void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
3041 FPURegister fd, FPURegister fs, FPURegister ft) {
3042 DCHECK(kArchVariant == kMips64r6);
3043 DCHECK((fmt & ~(31 << kRsShift)) == 0);
3044 Instr instr = COP1 | fmt | ft.code() << kFtShift |
3045 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
3046 emit(instr);
3047}
3048
3049
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003050void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
3051 FPURegister ft) {
3052 cmp(cond, W, fd, fs, ft);
3053}
3054
3055void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
3056 FPURegister ft) {
3057 cmp(cond, L, fd, fs, ft);
3058}
3059
3060
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003061void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
3062 DCHECK(kArchVariant == kMips64r6);
3063 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
3064 emit(instr);
3065}
3066
3067
3068void Assembler::bc1nez(int16_t offset, FPURegister ft) {
3069 DCHECK(kArchVariant == kMips64r6);
3070 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
3071 emit(instr);
3072}
3073
3074
3075// Conditions for < MIPSr6.
3076void Assembler::c(FPUCondition cond, SecondaryField fmt,
3077 FPURegister fs, FPURegister ft, uint16_t cc) {
3078 DCHECK(kArchVariant != kMips64r6);
3079 DCHECK(is_uint3(cc));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003080 DCHECK(fmt == S || fmt == D);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003081 DCHECK((fmt & ~(31 << kRsShift)) == 0);
3082 Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
3083 | cc << 8 | 3 << 4 | cond;
3084 emit(instr);
3085}
3086
3087
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003088void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
3089 uint16_t cc) {
3090 c(cond, S, fs, ft, cc);
3091}
3092
3093
3094void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
3095 uint16_t cc) {
3096 c(cond, D, fs, ft, cc);
3097}
3098
3099
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003100void Assembler::fcmp(FPURegister src1, const double src2,
3101 FPUCondition cond) {
3102 DCHECK(src2 == 0.0);
3103 mtc1(zero_reg, f14);
3104 cvt_d_w(f14, f14);
3105 c(cond, D, src1, f14, 0);
3106}
3107
3108
3109void Assembler::bc1f(int16_t offset, uint16_t cc) {
3110 DCHECK(is_uint3(cc));
3111 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
3112 emit(instr);
3113}
3114
3115
3116void Assembler::bc1t(int16_t offset, uint16_t cc) {
3117 DCHECK(is_uint3(cc));
3118 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
3119 emit(instr);
3120}
3121
3122
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003123int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
3124 intptr_t pc_delta) {
3125 if (RelocInfo::IsInternalReference(rmode)) {
3126 int64_t* p = reinterpret_cast<int64_t*>(pc);
3127 if (*p == kEndOfJumpChain) {
3128 return 0; // Number of instructions patched.
3129 }
3130 *p += pc_delta;
3131 return 2; // Number of instructions patched.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003132 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003133 Instr instr = instr_at(pc);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003134 DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003135 if (IsLui(instr)) {
3136 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
3137 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
3138 Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
3139 DCHECK(IsOri(instr_ori));
3140 DCHECK(IsOri(instr_ori2));
3141 // TODO(plind): symbolic names for the shifts.
3142 int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
3143 imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
3144 imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
3145 // Sign extend address.
3146 imm >>= 16;
3147
3148 if (imm == kEndOfJumpChain) {
3149 return 0; // Number of instructions patched.
3150 }
3151 imm += pc_delta;
3152 DCHECK((imm & 3) == 0);
3153
3154 instr_lui &= ~kImm16Mask;
3155 instr_ori &= ~kImm16Mask;
3156 instr_ori2 &= ~kImm16Mask;
3157
3158 instr_at_put(pc + 0 * Assembler::kInstrSize,
3159 instr_lui | ((imm >> 32) & kImm16Mask));
3160 instr_at_put(pc + 1 * Assembler::kInstrSize,
3161 instr_ori | (imm >> 16 & kImm16Mask));
3162 instr_at_put(pc + 3 * Assembler::kInstrSize,
3163 instr_ori2 | (imm & kImm16Mask));
3164 return 4; // Number of instructions patched.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003165 } else if (IsJ(instr) || IsJal(instr)) {
3166 // Regular j/jal relocation.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003167 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003168 imm28 += pc_delta;
3169 imm28 &= kImm28Mask;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003170 instr &= ~kImm26Mask;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003171 DCHECK((imm28 & 3) == 0);
3172 uint32_t imm26 = static_cast<uint32_t>(imm28 >> 2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003173 instr_at_put(pc, instr | (imm26 & kImm26Mask));
3174 return 1; // Number of instructions patched.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003175 } else {
3176 DCHECK(((instr & kJumpRawMask) == kJRawMark) ||
3177 ((instr & kJumpRawMask) == kJalRawMark));
3178 // Unbox raw offset and emit j/jal.
3179 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
3180 // Sign extend 28-bit offset to 32-bit.
3181 imm28 = (imm28 << 4) >> 4;
3182 uint64_t target =
3183 static_cast<int64_t>(imm28) + reinterpret_cast<uint64_t>(pc);
3184 target &= kImm28Mask;
3185 DCHECK((imm28 & 3) == 0);
3186 uint32_t imm26 = static_cast<uint32_t>(target >> 2);
3187 // Check markings whether to emit j or jal.
3188 uint32_t unbox = (instr & kJRawMark) ? J : JAL;
3189 instr_at_put(pc, unbox | (imm26 & kImm26Mask));
3190 return 1; // Number of instructions patched.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003191 }
3192}
3193
3194
3195void Assembler::GrowBuffer() {
3196 if (!own_buffer_) FATAL("external code buffer is too small");
3197
3198 // Compute new buffer size.
3199 CodeDesc desc; // The new buffer.
3200 if (buffer_size_ < 1 * MB) {
3201 desc.buffer_size = 2*buffer_size_;
3202 } else {
3203 desc.buffer_size = buffer_size_ + 1*MB;
3204 }
3205 CHECK_GT(desc.buffer_size, 0); // No overflow.
3206
3207 // Set up new buffer.
3208 desc.buffer = NewArray<byte>(desc.buffer_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003209 desc.origin = this;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003210
3211 desc.instr_size = pc_offset();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003212 desc.reloc_size =
3213 static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003214
3215 // Copy the data.
3216 intptr_t pc_delta = desc.buffer - buffer_;
3217 intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
3218 (buffer_ + buffer_size_);
3219 MemMove(desc.buffer, buffer_, desc.instr_size);
3220 MemMove(reloc_info_writer.pos() + rc_delta,
3221 reloc_info_writer.pos(), desc.reloc_size);
3222
3223 // Switch buffers.
3224 DeleteArray(buffer_);
3225 buffer_ = desc.buffer;
3226 buffer_size_ = desc.buffer_size;
3227 pc_ += pc_delta;
3228 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3229 reloc_info_writer.last_pc() + pc_delta);
3230
3231 // Relocate runtime entries.
3232 for (RelocIterator it(desc); !it.done(); it.next()) {
3233 RelocInfo::Mode rmode = it.rinfo()->rmode();
3234 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
3235 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003236 RelocateInternalReference(rmode, p, pc_delta);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003237 }
3238 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003239 DCHECK(!overflow());
3240}
3241
3242
3243void Assembler::db(uint8_t data) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003244 CheckForEmitInForbiddenSlot();
3245 EmitHelper(data);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003246}
3247
3248
3249void Assembler::dd(uint32_t data) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003250 CheckForEmitInForbiddenSlot();
3251 EmitHelper(data);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003252}
3253
3254
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003255void Assembler::dq(uint64_t data) {
3256 CheckForEmitInForbiddenSlot();
3257 EmitHelper(data);
3258}
3259
3260
3261void Assembler::dd(Label* label) {
3262 uint64_t data;
3263 CheckForEmitInForbiddenSlot();
3264 if (label->is_bound()) {
3265 data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
3266 } else {
3267 data = jump_address(label);
Ben Murdochc5610432016-08-08 18:44:38 +01003268 unbound_labels_count_++;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003269 internal_reference_positions_.insert(label->pos());
3270 }
3271 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3272 EmitHelper(data);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003273}
3274
3275
3276void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3277 // We do not try to reuse pool constants.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003278 RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
3279 if (rmode >= RelocInfo::COMMENT &&
Ben Murdochda12d292016-06-02 14:46:10 +01003280 rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003281 // Adjust code for new modes.
3282 DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003283 || RelocInfo::IsComment(rmode)
3284 || RelocInfo::IsPosition(rmode));
3285 // These modes do not need an entry in the constant pool.
3286 }
3287 if (!RelocInfo::IsNone(rinfo.rmode())) {
3288 // Don't record external references unless the heap will be serialized.
3289 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
3290 !serializer_enabled() && !emit_debug_code()) {
3291 return;
3292 }
3293 DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
3294 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003295 RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
3296 RecordedAstId().ToInt(), NULL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003297 ClearRecordedAstId();
3298 reloc_info_writer.Write(&reloc_info_with_ast_id);
3299 } else {
3300 reloc_info_writer.Write(&rinfo);
3301 }
3302 }
3303}
3304
3305
3306void Assembler::BlockTrampolinePoolFor(int instructions) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003307 CheckTrampolinePoolQuick(instructions);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003308 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
3309}
3310
3311
3312void Assembler::CheckTrampolinePool() {
3313 // Some small sequences of instructions must not be broken up by the
3314 // insertion of a trampoline pool; such sequences are protected by setting
3315 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
3316 // which are both checked here. Also, recursive calls to CheckTrampolinePool
3317 // are blocked by trampoline_pool_blocked_nesting_.
3318 if ((trampoline_pool_blocked_nesting_ > 0) ||
3319 (pc_offset() < no_trampoline_pool_before_)) {
3320 // Emission is currently blocked; make sure we try again as soon as
3321 // possible.
3322 if (trampoline_pool_blocked_nesting_ > 0) {
3323 next_buffer_check_ = pc_offset() + kInstrSize;
3324 } else {
3325 next_buffer_check_ = no_trampoline_pool_before_;
3326 }
3327 return;
3328 }
3329
3330 DCHECK(!trampoline_emitted_);
3331 DCHECK(unbound_labels_count_ >= 0);
3332 if (unbound_labels_count_ > 0) {
3333 // First we emit jump (2 instructions), then we emit trampoline pool.
3334 { BlockTrampolinePoolScope block_trampoline_pool(this);
3335 Label after_pool;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003336 if (kArchVariant == kMips64r6) {
3337 bc(&after_pool);
3338 } else {
3339 b(&after_pool);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003340 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003341 nop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003342
3343 int pool_start = pc_offset();
3344 for (int i = 0; i < unbound_labels_count_; i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003345 { BlockGrowBufferScope block_buf_growth(this);
3346 // Buffer growth (and relocation) must be blocked for internal
3347 // references until associated instructions are emitted and available
3348 // to be patched.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003349 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3350 j(&after_pool);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003351 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003352 nop();
3353 }
3354 bind(&after_pool);
3355 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
3356
3357 trampoline_emitted_ = true;
3358 // As we are only going to emit trampoline once, we need to prevent any
3359 // further emission.
3360 next_buffer_check_ = kMaxInt;
3361 }
3362 } else {
3363 // Number of branches to unbound label at this point is zero, so we can
3364 // move next buffer check to maximum.
3365 next_buffer_check_ = pc_offset() +
3366 kMaxBranchOffset - kTrampolineSlotsSize * 16;
3367 }
3368 return;
3369}
3370
3371
3372Address Assembler::target_address_at(Address pc) {
3373 Instr instr0 = instr_at(pc);
3374 Instr instr1 = instr_at(pc + 1 * kInstrSize);
3375 Instr instr3 = instr_at(pc + 3 * kInstrSize);
3376
3377 // Interpret 4 instructions for address generated by li: See listing in
3378 // Assembler::set_target_address_at() just below.
3379 if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
3380 (GetOpcodeField(instr3) == ORI)) {
3381 // Assemble the 48 bit value.
3382 int64_t addr = static_cast<int64_t>(
3383 ((uint64_t)(GetImmediate16(instr0)) << 32) |
3384 ((uint64_t)(GetImmediate16(instr1)) << 16) |
3385 ((uint64_t)(GetImmediate16(instr3))));
3386
3387 // Sign extend to get canonical address.
3388 addr = (addr << 16) >> 16;
3389 return reinterpret_cast<Address>(addr);
3390 }
3391 // We should never get here, force a bad address if we do.
3392 UNREACHABLE();
3393 return (Address)0x0;
3394}
3395
3396
3397// MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
3398// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
3399// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
3400// OS::nan_value() returns a qNaN.
3401void Assembler::QuietNaN(HeapObject* object) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003402 HeapNumber::cast(object)->set_value(std::numeric_limits<double>::quiet_NaN());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003403}
3404
3405
3406// On Mips64, a target address is stored in a 4-instruction sequence:
3407// 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
3408// 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
3409// 2: dsll(rd, rd, 16);
3410// 3: ori(rd, rd, j.imm32_ & kImm16Mask);
3411//
3412// Patching the address must replace all the lui & ori instructions,
3413// and flush the i-cache.
3414//
3415// There is an optimization below, which emits a nop when the address
3416// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
3417// and possibly removed.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003418void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003419 Address target,
3420 ICacheFlushMode icache_flush_mode) {
3421// There is an optimization where only 4 instructions are used to load address
3422// in code on MIP64 because only 48-bits of address is effectively used.
3423// It relies on fact the upper [63:48] bits are not used for virtual address
3424// translation and they have to be set according to value of bit 47 in order
3425// get canonical address.
3426 Instr instr1 = instr_at(pc + kInstrSize);
3427 uint32_t rt_code = GetRt(instr1);
3428 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
3429 uint64_t itarget = reinterpret_cast<uint64_t>(target);
3430
3431#ifdef DEBUG
3432 // Check we have the result from a li macro-instruction.
3433 Instr instr0 = instr_at(pc);
3434 Instr instr3 = instr_at(pc + kInstrSize * 3);
3435 CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
3436 GetOpcodeField(instr3) == ORI));
3437#endif
3438
3439 // Must use 4 instructions to insure patchable code.
3440 // lui rt, upper-16.
3441 // ori rt, rt, lower-16.
3442 // dsll rt, rt, 16.
3443 // ori rt rt, lower-16.
3444 *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
3445 *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
3446 | ((itarget >> 16) & kImm16Mask);
3447 *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
3448 | (itarget & kImm16Mask);
3449
3450 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003451 Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003452 }
3453}
3454
3455
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003456} // namespace internal
3457} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003458
3459#endif // V8_TARGET_ARCH_MIPS64