blob: f1e5dfb670c958e25b907696356596d2f90d2d17 [file] [log] [blame]
Andrei Popescu31002712010-02-23 13:46:05 +00001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
Ben Murdoch3ef787d2012-04-12 10:51:47 +010033// Copyright 2012 the V8 project authors. All rights reserved.
Andrei Popescu31002712010-02-23 13:46:05 +000034
35
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036#include "src/v8.h"
Leon Clarkef7060e22010-06-03 12:02:55 +010037
Ben Murdochb8a8cc12014-11-26 15:28:44 +000038#if V8_TARGET_ARCH_MIPS
Leon Clarkef7060e22010-06-03 12:02:55 +010039
Ben Murdochb8a8cc12014-11-26 15:28:44 +000040#include "src/base/bits.h"
41#include "src/base/cpu.h"
42#include "src/mips/assembler-mips-inl.h"
43#include "src/serialize.h"
Andrei Popescu31002712010-02-23 13:46:05 +000044
Andrei Popescu31002712010-02-23 13:46:05 +000045namespace v8 {
46namespace internal {
47
Ben Murdoch589d6972011-11-30 16:04:58 +000048// Get the CPU features enabled by the build. For cross compilation the
49// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
50// can be defined to enable FPU instructions when building the
51// snapshot.
Ben Murdochb8a8cc12014-11-26 15:28:44 +000052static unsigned CpuFeaturesImpliedByCompiler() {
53 unsigned answer = 0;
Ben Murdoch589d6972011-11-30 16:04:58 +000054#ifdef CAN_USE_FPU_INSTRUCTIONS
55 answer |= 1u << FPU;
56#endif // def CAN_USE_FPU_INSTRUCTIONS
57
Ben Murdoch589d6972011-11-30 16:04:58 +000058 // If the compiler is allowed to use FPU then we can use FPU too in our code
59 // generation even when generating snapshots. This won't work for cross
60 // compilation.
Ben Murdochb8a8cc12014-11-26 15:28:44 +000061#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
Ben Murdoch589d6972011-11-30 16:04:58 +000062 answer |= 1u << FPU;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000063#endif
Ben Murdoch589d6972011-11-30 16:04:58 +000064
65 return answer;
66}
67
68
Ben Murdochb8a8cc12014-11-26 15:28:44 +000069const char* DoubleRegister::AllocationIndexToString(int index) {
70 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
71 const char* const names[] = {
72 "f0",
73 "f2",
74 "f4",
75 "f6",
76 "f8",
77 "f10",
78 "f12",
79 "f14",
80 "f16",
81 "f18",
82 "f20",
83 "f22",
84 "f24",
85 "f26"
86 };
87 return names[index];
88}
Ben Murdoch589d6972011-11-30 16:04:58 +000089
Ben Murdoch589d6972011-11-30 16:04:58 +000090
Ben Murdochb8a8cc12014-11-26 15:28:44 +000091void CpuFeatures::ProbeImpl(bool cross_compile) {
92 supported_ |= CpuFeaturesImpliedByCompiler();
93
94 // Only use statically determined features for cross compile (snapshot).
95 if (cross_compile) return;
Ben Murdoch589d6972011-11-30 16:04:58 +000096
Steve Block44f0eee2011-05-26 01:26:41 +010097 // If the compiler is allowed to use fpu then we can use fpu too in our
98 // code generation.
Ben Murdochb8a8cc12014-11-26 15:28:44 +000099#ifndef __mips__
100 // For the simulator build, use FPU.
101 supported_ |= 1u << FPU;
102#if defined(_MIPS_ARCH_MIPS32R6)
103 // FP64 mode is implied on r6.
104 supported_ |= 1u << FP64FPU;
105#endif
106#if defined(FPU_MODE_FP64)
107 supported_ |= 1u << FP64FPU;
108#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100109#else
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000110 // Probe for additional features at runtime.
111 base::CPU cpu;
112 if (cpu.has_fpu()) supported_ |= 1u << FPU;
113#if defined(FPU_MODE_FPXX)
114 if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
115#elif defined(FPU_MODE_FP64)
116 supported_ |= 1u << FP64FPU;
117#endif
118#if defined(_MIPS_ARCH_MIPS32RX)
119 if (cpu.architecture() == 6) {
120 supported_ |= 1u << MIPSr6;
121 } else if (cpu.architecture() == 2) {
122 supported_ |= 1u << MIPSr1;
123 supported_ |= 1u << MIPSr2;
124 } else {
125 supported_ |= 1u << MIPSr1;
Steve Block44f0eee2011-05-26 01:26:41 +0100126 }
Steve Block44f0eee2011-05-26 01:26:41 +0100127#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000128#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100129}
Andrei Popescu31002712010-02-23 13:46:05 +0000130
131
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000132void CpuFeatures::PrintTarget() { }
133void CpuFeatures::PrintFeatures() { }
134
135
Andrei Popescu31002712010-02-23 13:46:05 +0000136int ToNumber(Register reg) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000137 DCHECK(reg.is_valid());
Andrei Popescu31002712010-02-23 13:46:05 +0000138 const int kNumbers[] = {
139 0, // zero_reg
140 1, // at
141 2, // v0
142 3, // v1
143 4, // a0
144 5, // a1
145 6, // a2
146 7, // a3
147 8, // t0
148 9, // t1
149 10, // t2
150 11, // t3
151 12, // t4
152 13, // t5
153 14, // t6
154 15, // t7
155 16, // s0
156 17, // s1
157 18, // s2
158 19, // s3
159 20, // s4
160 21, // s5
161 22, // s6
162 23, // s7
163 24, // t8
164 25, // t9
165 26, // k0
166 27, // k1
167 28, // gp
168 29, // sp
Ben Murdochdb1b4382012-04-26 19:03:50 +0100169 30, // fp
Andrei Popescu31002712010-02-23 13:46:05 +0000170 31, // ra
171 };
172 return kNumbers[reg.code()];
173}
174
Steve Block44f0eee2011-05-26 01:26:41 +0100175
Andrei Popescu31002712010-02-23 13:46:05 +0000176Register ToRegister(int num) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000177 DCHECK(num >= 0 && num < kNumRegisters);
Andrei Popescu31002712010-02-23 13:46:05 +0000178 const Register kRegisters[] = {
179 zero_reg,
180 at,
181 v0, v1,
182 a0, a1, a2, a3,
183 t0, t1, t2, t3, t4, t5, t6, t7,
184 s0, s1, s2, s3, s4, s5, s6, s7,
185 t8, t9,
186 k0, k1,
187 gp,
188 sp,
Ben Murdochdb1b4382012-04-26 19:03:50 +0100189 fp,
Andrei Popescu31002712010-02-23 13:46:05 +0000190 ra
191 };
192 return kRegisters[num];
193}
194
195
196// -----------------------------------------------------------------------------
197// Implementation of RelocInfo.
198
Ben Murdoch589d6972011-11-30 16:04:58 +0000199const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
200 1 << RelocInfo::INTERNAL_REFERENCE;
Andrei Popescu31002712010-02-23 13:46:05 +0000201
Steve Block44f0eee2011-05-26 01:26:41 +0100202
203bool RelocInfo::IsCodedSpecially() {
204 // The deserializer needs to know whether a pointer is specially coded. Being
205 // specially coded on MIPS means that it is a lui/ori instruction, and that is
206 // always the case inside code objects.
207 return true;
208}
209
210
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000211bool RelocInfo::IsInConstantPool() {
212 return false;
213}
214
215
Andrei Popescu31002712010-02-23 13:46:05 +0000216// Patch the code at the current address with the supplied instructions.
217void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
218 Instr* pc = reinterpret_cast<Instr*>(pc_);
219 Instr* instr = reinterpret_cast<Instr*>(instructions);
220 for (int i = 0; i < instruction_count; i++) {
221 *(pc + i) = *(instr + i);
222 }
223
224 // Indicate that code has changed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000225 CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
Andrei Popescu31002712010-02-23 13:46:05 +0000226}
227
228
229// Patch the code at the current PC with a call to the target address.
230// Additional guard instructions can be added if required.
231void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
232 // Patch the code at the current address with a call to the target.
233 UNIMPLEMENTED_MIPS();
234}
235
236
237// -----------------------------------------------------------------------------
238// Implementation of Operand and MemOperand.
239// See assembler-mips-inl.h for inlined constructors.
240
241Operand::Operand(Handle<Object> handle) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000242 AllowDeferredHandleDereference using_raw_address;
Andrei Popescu31002712010-02-23 13:46:05 +0000243 rm_ = no_reg;
244 // Verify all Objects referred by code are NOT in new space.
245 Object* obj = *handle;
Andrei Popescu31002712010-02-23 13:46:05 +0000246 if (obj->IsHeapObject()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000247 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
Andrei Popescu31002712010-02-23 13:46:05 +0000248 imm32_ = reinterpret_cast<intptr_t>(handle.location());
249 rmode_ = RelocInfo::EMBEDDED_OBJECT;
250 } else {
251 // No relocation needed.
252 imm32_ = reinterpret_cast<intptr_t>(obj);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000253 rmode_ = RelocInfo::NONE32;
Andrei Popescu31002712010-02-23 13:46:05 +0000254 }
255}
256
Steve Block44f0eee2011-05-26 01:26:41 +0100257
258MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
Andrei Popescu31002712010-02-23 13:46:05 +0000259 offset_ = offset;
260}
261
262
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000263MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
264 OffsetAddend offset_addend) : Operand(rm) {
265 offset_ = unit * multiplier + offset_addend;
266}
267
268
Andrei Popescu31002712010-02-23 13:46:05 +0000269// -----------------------------------------------------------------------------
Steve Block44f0eee2011-05-26 01:26:41 +0100270// Specific instructions, constants, and masks.
Andrei Popescu31002712010-02-23 13:46:05 +0000271
Steve Block44f0eee2011-05-26 01:26:41 +0100272static const int kNegOffset = 0x00008000;
273// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
274// operations as post-increment of sp.
Ben Murdochdb1b4382012-04-26 19:03:50 +0100275const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000276 | (kRegister_sp_Code << kRtShift)
277 | (kPointerSize & kImm16Mask); // NOLINT
Steve Block44f0eee2011-05-26 01:26:41 +0100278// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
Ben Murdochdb1b4382012-04-26 19:03:50 +0100279const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000280 | (kRegister_sp_Code << kRtShift)
281 | (-kPointerSize & kImm16Mask); // NOLINT
Steve Block44f0eee2011-05-26 01:26:41 +0100282// sw(r, MemOperand(sp, 0))
Ben Murdochdb1b4382012-04-26 19:03:50 +0100283const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000284 | (0 & kImm16Mask); // NOLINT
Steve Block44f0eee2011-05-26 01:26:41 +0100285// lw(r, MemOperand(sp, 0))
Ben Murdochdb1b4382012-04-26 19:03:50 +0100286const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000287 | (0 & kImm16Mask); // NOLINT
Andrei Popescu31002712010-02-23 13:46:05 +0000288
Ben Murdochdb1b4382012-04-26 19:03:50 +0100289const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000290 | (0 & kImm16Mask); // NOLINT
Steve Block44f0eee2011-05-26 01:26:41 +0100291
Ben Murdochdb1b4382012-04-26 19:03:50 +0100292const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000293 | (0 & kImm16Mask); // NOLINT
Steve Block44f0eee2011-05-26 01:26:41 +0100294
Ben Murdochdb1b4382012-04-26 19:03:50 +0100295const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000296 | (kNegOffset & kImm16Mask); // NOLINT
Steve Block44f0eee2011-05-26 01:26:41 +0100297
Ben Murdochdb1b4382012-04-26 19:03:50 +0100298const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000299 | (kNegOffset & kImm16Mask); // NOLINT
Steve Block44f0eee2011-05-26 01:26:41 +0100300// A mask for the Rt register for push, pop, lw, sw instructions.
301const Instr kRtMask = kRtFieldMask;
302const Instr kLwSwInstrTypeMask = 0xffe00000;
303const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
304const Instr kLwSwOffsetMask = kImm16Mask;
305
306
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000307Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
308 : AssemblerBase(isolate, buffer, buffer_size),
309 recorded_ast_id_(TypeFeedbackId::None()),
310 positions_recorder_(this) {
311 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
Steve Block44f0eee2011-05-26 01:26:41 +0100312
313 last_trampoline_pool_end_ = 0;
314 no_trampoline_pool_before_ = 0;
315 trampoline_pool_blocked_nesting_ = 0;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000316 // We leave space (16 * kTrampolineSlotsSize)
317 // for BlockTrampolinePoolScope buffer.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000318 next_buffer_check_ = FLAG_force_long_branches
319 ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
Ben Murdoch257744e2011-11-30 15:57:28 +0000320 internal_trampoline_exception_ = false;
321 last_bound_pos_ = 0;
322
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000323 trampoline_emitted_ = FLAG_force_long_branches;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000324 unbound_labels_count_ = 0;
325 block_buffer_growth_ = false;
326
327 ClearRecordedAstId();
Andrei Popescu31002712010-02-23 13:46:05 +0000328}
329
330
Andrei Popescu31002712010-02-23 13:46:05 +0000331void Assembler::GetCode(CodeDesc* desc) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000332 DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100333 // Set up code descriptor.
Andrei Popescu31002712010-02-23 13:46:05 +0000334 desc->buffer = buffer_;
335 desc->buffer_size = buffer_size_;
336 desc->instr_size = pc_offset();
337 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000338 desc->origin = this;
Andrei Popescu31002712010-02-23 13:46:05 +0000339}
340
341
Steve Block44f0eee2011-05-26 01:26:41 +0100342void Assembler::Align(int m) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000343 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
Steve Block44f0eee2011-05-26 01:26:41 +0100344 while ((pc_offset() & (m - 1)) != 0) {
345 nop();
346 }
347}
348
349
350void Assembler::CodeTargetAlign() {
351 // No advantage to aligning branch/call targets to more than
352 // single instruction, that I am aware of.
353 Align(4);
354}
355
356
Ben Murdoch257744e2011-11-30 15:57:28 +0000357Register Assembler::GetRtReg(Instr instr) {
Steve Block44f0eee2011-05-26 01:26:41 +0100358 Register rt;
Ben Murdoch257744e2011-11-30 15:57:28 +0000359 rt.code_ = (instr & kRtFieldMask) >> kRtShift;
Steve Block44f0eee2011-05-26 01:26:41 +0100360 return rt;
361}
362
363
Ben Murdoch257744e2011-11-30 15:57:28 +0000364Register Assembler::GetRsReg(Instr instr) {
365 Register rs;
366 rs.code_ = (instr & kRsFieldMask) >> kRsShift;
367 return rs;
368}
369
370
371Register Assembler::GetRdReg(Instr instr) {
372 Register rd;
373 rd.code_ = (instr & kRdFieldMask) >> kRdShift;
374 return rd;
375}
376
377
378uint32_t Assembler::GetRt(Instr instr) {
379 return (instr & kRtFieldMask) >> kRtShift;
380}
381
382
383uint32_t Assembler::GetRtField(Instr instr) {
384 return instr & kRtFieldMask;
385}
386
387
388uint32_t Assembler::GetRs(Instr instr) {
389 return (instr & kRsFieldMask) >> kRsShift;
390}
391
392
393uint32_t Assembler::GetRsField(Instr instr) {
394 return instr & kRsFieldMask;
395}
396
397
398uint32_t Assembler::GetRd(Instr instr) {
399 return (instr & kRdFieldMask) >> kRdShift;
400}
401
402
403uint32_t Assembler::GetRdField(Instr instr) {
404 return instr & kRdFieldMask;
405}
406
407
408uint32_t Assembler::GetSa(Instr instr) {
409 return (instr & kSaFieldMask) >> kSaShift;
410}
411
412
413uint32_t Assembler::GetSaField(Instr instr) {
414 return instr & kSaFieldMask;
415}
416
417
418uint32_t Assembler::GetOpcodeField(Instr instr) {
419 return instr & kOpcodeMask;
420}
421
422
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000423uint32_t Assembler::GetFunction(Instr instr) {
424 return (instr & kFunctionFieldMask) >> kFunctionShift;
425}
426
427
428uint32_t Assembler::GetFunctionField(Instr instr) {
429 return instr & kFunctionFieldMask;
430}
431
432
Ben Murdoch257744e2011-11-30 15:57:28 +0000433uint32_t Assembler::GetImmediate16(Instr instr) {
434 return instr & kImm16Mask;
435}
436
437
438uint32_t Assembler::GetLabelConst(Instr instr) {
439 return instr & ~kImm16Mask;
440}
441
442
Steve Block44f0eee2011-05-26 01:26:41 +0100443bool Assembler::IsPop(Instr instr) {
444 return (instr & ~kRtMask) == kPopRegPattern;
445}
446
447
448bool Assembler::IsPush(Instr instr) {
449 return (instr & ~kRtMask) == kPushRegPattern;
450}
451
452
453bool Assembler::IsSwRegFpOffset(Instr instr) {
454 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
455}
456
457
458bool Assembler::IsLwRegFpOffset(Instr instr) {
459 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
460}
461
462
463bool Assembler::IsSwRegFpNegOffset(Instr instr) {
464 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
465 kSwRegFpNegOffsetPattern);
466}
467
468
469bool Assembler::IsLwRegFpNegOffset(Instr instr) {
470 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
471 kLwRegFpNegOffsetPattern);
472}
473
474
Andrei Popescu31002712010-02-23 13:46:05 +0000475// Labels refer to positions in the (to be) generated code.
476// There are bound, linked, and unused labels.
477//
478// Bound labels refer to known positions in the already
479// generated code. pos() is the position the label refers to.
480//
481// Linked labels refer to unknown positions in the code
482// to be generated; pos() is the position of the last
483// instruction using the label.
484
Steve Block44f0eee2011-05-26 01:26:41 +0100485// The link chain is terminated by a value in the instruction of -1,
486// which is an otherwise illegal value (branch -1 is inf loop).
487// The instruction 16-bit offset field addresses 32-bit words, but in
488// code is conv to an 18-bit value addressing bytes, hence the -4 value.
Andrei Popescu31002712010-02-23 13:46:05 +0000489
Andrei Popescu31002712010-02-23 13:46:05 +0000490const int kEndOfChain = -4;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000491// Determines the end of the Jump chain (a subset of the label link chain).
492const int kEndOfJumpChain = 0;
Andrei Popescu31002712010-02-23 13:46:05 +0000493
Steve Block44f0eee2011-05-26 01:26:41 +0100494
495bool Assembler::IsBranch(Instr instr) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000496 uint32_t opcode = GetOpcodeField(instr);
497 uint32_t rt_field = GetRtField(instr);
498 uint32_t rs_field = GetRsField(instr);
Andrei Popescu31002712010-02-23 13:46:05 +0000499 // Checks if the instruction is a branch.
500 return opcode == BEQ ||
501 opcode == BNE ||
502 opcode == BLEZ ||
503 opcode == BGTZ ||
504 opcode == BEQL ||
505 opcode == BNEL ||
506 opcode == BLEZL ||
Ben Murdoch257744e2011-11-30 15:57:28 +0000507 opcode == BGTZL ||
Andrei Popescu31002712010-02-23 13:46:05 +0000508 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
509 rt_field == BLTZAL || rt_field == BGEZAL)) ||
Steve Block44f0eee2011-05-26 01:26:41 +0100510 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000511 (opcode == COP1 && rs_field == BC1EQZ) ||
512 (opcode == COP1 && rs_field == BC1NEZ);
513}
514
515
516bool Assembler::IsEmittedConstant(Instr instr) {
517 uint32_t label_constant = GetLabelConst(instr);
518 return label_constant == 0; // Emitted label const in reg-exp engine.
Steve Block44f0eee2011-05-26 01:26:41 +0100519}
520
521
Ben Murdoch257744e2011-11-30 15:57:28 +0000522bool Assembler::IsBeq(Instr instr) {
523 return GetOpcodeField(instr) == BEQ;
524}
525
526
527bool Assembler::IsBne(Instr instr) {
528 return GetOpcodeField(instr) == BNE;
529}
530
531
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000532bool Assembler::IsJump(Instr instr) {
533 uint32_t opcode = GetOpcodeField(instr);
534 uint32_t rt_field = GetRtField(instr);
535 uint32_t rd_field = GetRdField(instr);
536 uint32_t function_field = GetFunctionField(instr);
537 // Checks if the instruction is a jump.
538 return opcode == J || opcode == JAL ||
539 (opcode == SPECIAL && rt_field == 0 &&
540 ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
541}
542
543
544bool Assembler::IsJ(Instr instr) {
545 uint32_t opcode = GetOpcodeField(instr);
546 // Checks if the instruction is a jump.
547 return opcode == J;
548}
549
550
Ben Murdoch589d6972011-11-30 16:04:58 +0000551bool Assembler::IsJal(Instr instr) {
552 return GetOpcodeField(instr) == JAL;
553}
554
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000555
Ben Murdoch589d6972011-11-30 16:04:58 +0000556bool Assembler::IsJr(Instr instr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000557 if (!IsMipsArchVariant(kMips32r6)) {
558 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
559 } else {
560 return GetOpcodeField(instr) == SPECIAL &&
561 GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
562 }
Ben Murdoch589d6972011-11-30 16:04:58 +0000563}
564
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000565
Ben Murdoch589d6972011-11-30 16:04:58 +0000566bool Assembler::IsJalr(Instr instr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000567 return GetOpcodeField(instr) == SPECIAL &&
568 GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
Ben Murdoch589d6972011-11-30 16:04:58 +0000569}
570
571
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000572bool Assembler::IsLui(Instr instr) {
573 uint32_t opcode = GetOpcodeField(instr);
574 // Checks if the instruction is a load upper immediate.
575 return opcode == LUI;
576}
577
578
579bool Assembler::IsOri(Instr instr) {
580 uint32_t opcode = GetOpcodeField(instr);
581 // Checks if the instruction is a load upper immediate.
582 return opcode == ORI;
583}
584
585
Steve Block44f0eee2011-05-26 01:26:41 +0100586bool Assembler::IsNop(Instr instr, unsigned int type) {
587 // See Assembler::nop(type).
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000588 DCHECK(type < 32);
Ben Murdoch257744e2011-11-30 15:57:28 +0000589 uint32_t opcode = GetOpcodeField(instr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000590 uint32_t function = GetFunctionField(instr);
Ben Murdoch257744e2011-11-30 15:57:28 +0000591 uint32_t rt = GetRt(instr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000592 uint32_t rd = GetRd(instr);
Ben Murdoch257744e2011-11-30 15:57:28 +0000593 uint32_t sa = GetSa(instr);
Steve Block44f0eee2011-05-26 01:26:41 +0100594
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000595 // Traditional mips nop == sll(zero_reg, zero_reg, 0)
596 // When marking non-zero type, use sll(zero_reg, at, type)
597 // to avoid use of mips ssnop and ehb special encodings
598 // of the sll instruction.
Steve Block44f0eee2011-05-26 01:26:41 +0100599
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000600 Register nop_rt_reg = (type == 0) ? zero_reg : at;
601 bool ret = (opcode == SPECIAL && function == SLL &&
602 rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
603 rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
Steve Block44f0eee2011-05-26 01:26:41 +0100604 sa == type);
605
606 return ret;
607}
608
609
610int32_t Assembler::GetBranchOffset(Instr instr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000611 DCHECK(IsBranch(instr));
612 return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
Steve Block44f0eee2011-05-26 01:26:41 +0100613}
614
615
616bool Assembler::IsLw(Instr instr) {
617 return ((instr & kOpcodeMask) == LW);
618}
619
620
621int16_t Assembler::GetLwOffset(Instr instr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000622 DCHECK(IsLw(instr));
Steve Block44f0eee2011-05-26 01:26:41 +0100623 return ((instr & kImm16Mask));
624}
625
626
627Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000628 DCHECK(IsLw(instr));
Steve Block44f0eee2011-05-26 01:26:41 +0100629
630 // We actually create a new lw instruction based on the original one.
631 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
632 | (offset & kImm16Mask);
633
634 return temp_instr;
635}
636
637
638bool Assembler::IsSw(Instr instr) {
639 return ((instr & kOpcodeMask) == SW);
640}
641
642
643Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000644 DCHECK(IsSw(instr));
Steve Block44f0eee2011-05-26 01:26:41 +0100645 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
646}
647
648
649bool Assembler::IsAddImmediate(Instr instr) {
650 return ((instr & kOpcodeMask) == ADDIU);
651}
652
653
654Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000655 DCHECK(IsAddImmediate(instr));
Steve Block44f0eee2011-05-26 01:26:41 +0100656 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +0000657}
658
659
Ben Murdoch257744e2011-11-30 15:57:28 +0000660bool Assembler::IsAndImmediate(Instr instr) {
661 return GetOpcodeField(instr) == ANDI;
662}
663
664
Andrei Popescu31002712010-02-23 13:46:05 +0000665int Assembler::target_at(int32_t pos) {
666 Instr instr = instr_at(pos);
667 if ((instr & ~kImm16Mask) == 0) {
668 // Emitted label constant, not part of a branch.
Steve Block44f0eee2011-05-26 01:26:41 +0100669 if (instr == 0) {
670 return kEndOfChain;
671 } else {
672 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
673 return (imm18 + pos);
674 }
Andrei Popescu31002712010-02-23 13:46:05 +0000675 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000676 // Check we have a branch or jump instruction.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000677 DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
Andrei Popescu31002712010-02-23 13:46:05 +0000678 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
679 // the compiler uses arithmectic shifts for signed integers.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000680 if (IsBranch(instr)) {
681 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
Andrei Popescu31002712010-02-23 13:46:05 +0000682
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000683 if (imm18 == kEndOfChain) {
684 // EndOfChain sentinel is returned directly, not relative to pc or pos.
685 return kEndOfChain;
686 } else {
687 return pos + kBranchPCOffset + imm18;
688 }
689 } else if (IsLui(instr)) {
690 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
691 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000692 DCHECK(IsOri(instr_ori));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000693 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
694 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
695
696 if (imm == kEndOfJumpChain) {
697 // EndOfChain sentinel is returned directly, not relative to pc or pos.
698 return kEndOfChain;
699 } else {
700 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
701 int32_t delta = instr_address - imm;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000702 DCHECK(pos > delta);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000703 return pos - delta;
704 }
Steve Block44f0eee2011-05-26 01:26:41 +0100705 } else {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000706 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
707 if (imm28 == kEndOfJumpChain) {
708 // EndOfChain sentinel is returned directly, not relative to pc or pos.
709 return kEndOfChain;
710 } else {
711 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
712 instr_address &= kImm28Mask;
713 int32_t delta = instr_address - imm28;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000714 DCHECK(pos > delta);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000715 return pos - delta;
716 }
Steve Block44f0eee2011-05-26 01:26:41 +0100717 }
Andrei Popescu31002712010-02-23 13:46:05 +0000718}
719
720
721void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
722 Instr instr = instr_at(pos);
723 if ((instr & ~kImm16Mask) == 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000724 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
Andrei Popescu31002712010-02-23 13:46:05 +0000725 // Emitted label constant, not part of a branch.
726 // Make label relative to Code* of generated Code object.
727 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
728 return;
729 }
730
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000731 DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000732 if (IsBranch(instr)) {
733 int32_t imm18 = target_pos - (pos + kBranchPCOffset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000734 DCHECK((imm18 & 3) == 0);
Andrei Popescu31002712010-02-23 13:46:05 +0000735
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000736 instr &= ~kImm16Mask;
737 int32_t imm16 = imm18 >> 2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000738 DCHECK(is_int16(imm16));
Andrei Popescu31002712010-02-23 13:46:05 +0000739
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000740 instr_at_put(pos, instr | (imm16 & kImm16Mask));
741 } else if (IsLui(instr)) {
742 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
743 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000744 DCHECK(IsOri(instr_ori));
745 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
746 DCHECK((imm & 3) == 0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000747
748 instr_lui &= ~kImm16Mask;
749 instr_ori &= ~kImm16Mask;
750
751 instr_at_put(pos + 0 * Assembler::kInstrSize,
752 instr_lui | ((imm & kHiMask) >> kLuiShift));
753 instr_at_put(pos + 1 * Assembler::kInstrSize,
754 instr_ori | (imm & kImm16Mask));
755 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000756 uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000757 imm28 &= kImm28Mask;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000758 DCHECK((imm28 & 3) == 0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000759
760 instr &= ~kImm26Mask;
761 uint32_t imm26 = imm28 >> 2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000762 DCHECK(is_uint26(imm26));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000763
764 instr_at_put(pos, instr | (imm26 & kImm26Mask));
765 }
Andrei Popescu31002712010-02-23 13:46:05 +0000766}
767
768
769void Assembler::print(Label* L) {
770 if (L->is_unused()) {
771 PrintF("unused label\n");
772 } else if (L->is_bound()) {
773 PrintF("bound label to %d\n", L->pos());
774 } else if (L->is_linked()) {
775 Label l = *L;
776 PrintF("unbound label");
777 while (l.is_linked()) {
778 PrintF("@ %d ", l.pos());
779 Instr instr = instr_at(l.pos());
780 if ((instr & ~kImm16Mask) == 0) {
781 PrintF("value\n");
782 } else {
783 PrintF("%d\n", instr);
784 }
785 next(&l);
786 }
787 } else {
788 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
789 }
790}
791
792
793void Assembler::bind_to(Label* L, int pos) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000794 DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000795 int32_t trampoline_pos = kInvalidSlotPos;
796 if (L->is_linked() && !trampoline_emitted_) {
797 unbound_labels_count_--;
798 next_buffer_check_ += kTrampolineSlotsSize;
799 }
800
Andrei Popescu31002712010-02-23 13:46:05 +0000801 while (L->is_linked()) {
802 int32_t fixup_pos = L->pos();
Steve Block44f0eee2011-05-26 01:26:41 +0100803 int32_t dist = pos - fixup_pos;
804 next(L); // Call next before overwriting link with target at fixup_pos.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000805 Instr instr = instr_at(fixup_pos);
806 if (IsBranch(instr)) {
807 if (dist > kMaxBranchOffset) {
808 if (trampoline_pos == kInvalidSlotPos) {
809 trampoline_pos = get_trampoline_entry(fixup_pos);
810 CHECK(trampoline_pos != kInvalidSlotPos);
Ben Murdoch257744e2011-11-30 15:57:28 +0000811 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000812 DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
Steve Block44f0eee2011-05-26 01:26:41 +0100813 target_at_put(fixup_pos, trampoline_pos);
814 fixup_pos = trampoline_pos;
815 dist = pos - fixup_pos;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000816 }
817 target_at_put(fixup_pos, pos);
818 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000819 DCHECK(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000820 target_at_put(fixup_pos, pos);
821 }
Andrei Popescu31002712010-02-23 13:46:05 +0000822 }
823 L->bind_to(pos);
824
825 // Keep track of the last bound label so we don't eliminate any instructions
826 // before a bound label.
827 if (pos > last_bound_pos_)
828 last_bound_pos_ = pos;
829}
830
831
Andrei Popescu31002712010-02-23 13:46:05 +0000832void Assembler::bind(Label* L) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000833 DCHECK(!L->is_bound()); // Label can only be bound once.
Andrei Popescu31002712010-02-23 13:46:05 +0000834 bind_to(L, pc_offset());
835}
836
837
838void Assembler::next(Label* L) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000839 DCHECK(L->is_linked());
Andrei Popescu31002712010-02-23 13:46:05 +0000840 int link = target_at(L->pos());
Steve Block44f0eee2011-05-26 01:26:41 +0100841 if (link == kEndOfChain) {
Andrei Popescu31002712010-02-23 13:46:05 +0000842 L->Unuse();
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000843 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000844 DCHECK(link >= 0);
Steve Block44f0eee2011-05-26 01:26:41 +0100845 L->link_to(link);
Andrei Popescu31002712010-02-23 13:46:05 +0000846 }
847}
848
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000849
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000850bool Assembler::is_near(Label* L) {
851 if (L->is_bound()) {
852 return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
853 }
854 return false;
855}
Andrei Popescu31002712010-02-23 13:46:05 +0000856
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000857
Andrei Popescu31002712010-02-23 13:46:05 +0000858// We have to use a temporary register for things that can be relocated even
859// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
860// space. There is no guarantee that the relocated location can be similarly
861// encoded.
Steve Block44f0eee2011-05-26 01:26:41 +0100862bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000863 return !RelocInfo::IsNone(rmode);
Andrei Popescu31002712010-02-23 13:46:05 +0000864}
865
Andrei Popescu31002712010-02-23 13:46:05 +0000866void Assembler::GenInstrRegister(Opcode opcode,
867 Register rs,
868 Register rt,
869 Register rd,
870 uint16_t sa,
871 SecondaryField func) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000872 DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
Andrei Popescu31002712010-02-23 13:46:05 +0000873 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
874 | (rd.code() << kRdShift) | (sa << kSaShift) | func;
875 emit(instr);
876}
877
878
879void Assembler::GenInstrRegister(Opcode opcode,
Steve Block44f0eee2011-05-26 01:26:41 +0100880 Register rs,
881 Register rt,
882 uint16_t msb,
883 uint16_t lsb,
884 SecondaryField func) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000885 DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
Steve Block44f0eee2011-05-26 01:26:41 +0100886 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
887 | (msb << kRdShift) | (lsb << kSaShift) | func;
888 emit(instr);
889}
890
891
892void Assembler::GenInstrRegister(Opcode opcode,
Andrei Popescu31002712010-02-23 13:46:05 +0000893 SecondaryField fmt,
894 FPURegister ft,
895 FPURegister fs,
896 FPURegister fd,
897 SecondaryField func) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000898 DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
Steve Block44f0eee2011-05-26 01:26:41 +0100899 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
900 | (fd.code() << kFdShift) | func;
Andrei Popescu31002712010-02-23 13:46:05 +0000901 emit(instr);
902}
903
904
905void Assembler::GenInstrRegister(Opcode opcode,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000906 FPURegister fr,
907 FPURegister ft,
908 FPURegister fs,
909 FPURegister fd,
910 SecondaryField func) {
911 DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
912 Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
913 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
914 emit(instr);
915}
916
917
918void Assembler::GenInstrRegister(Opcode opcode,
Andrei Popescu31002712010-02-23 13:46:05 +0000919 SecondaryField fmt,
920 Register rt,
921 FPURegister fs,
922 FPURegister fd,
923 SecondaryField func) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000924 DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
Andrei Popescu31002712010-02-23 13:46:05 +0000925 Instr instr = opcode | fmt | (rt.code() << kRtShift)
Steve Block44f0eee2011-05-26 01:26:41 +0100926 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
927 emit(instr);
928}
929
930
931void Assembler::GenInstrRegister(Opcode opcode,
932 SecondaryField fmt,
933 Register rt,
934 FPUControlRegister fs,
935 SecondaryField func) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000936 DCHECK(fs.is_valid() && rt.is_valid());
Steve Block44f0eee2011-05-26 01:26:41 +0100937 Instr instr =
938 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
Andrei Popescu31002712010-02-23 13:46:05 +0000939 emit(instr);
940}
941
942
943// Instructions with immediate value.
944// Registers are in the order of the instruction encoding, from left to right.
945void Assembler::GenInstrImmediate(Opcode opcode,
946 Register rs,
947 Register rt,
948 int32_t j) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000949 DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
Andrei Popescu31002712010-02-23 13:46:05 +0000950 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
951 | (j & kImm16Mask);
952 emit(instr);
953}
954
955
956void Assembler::GenInstrImmediate(Opcode opcode,
957 Register rs,
958 SecondaryField SF,
959 int32_t j) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000960 DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
Andrei Popescu31002712010-02-23 13:46:05 +0000961 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
962 emit(instr);
963}
964
965
966void Assembler::GenInstrImmediate(Opcode opcode,
967 Register rs,
968 FPURegister ft,
969 int32_t j) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000970 DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
Andrei Popescu31002712010-02-23 13:46:05 +0000971 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
972 | (j & kImm16Mask);
973 emit(instr);
974}
975
976
Andrei Popescu31002712010-02-23 13:46:05 +0000977void Assembler::GenInstrJump(Opcode opcode,
Ben Murdoch589d6972011-11-30 16:04:58 +0000978 uint32_t address) {
Steve Block44f0eee2011-05-26 01:26:41 +0100979 BlockTrampolinePoolScope block_trampoline_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000980 DCHECK(is_uint26(address));
Andrei Popescu31002712010-02-23 13:46:05 +0000981 Instr instr = opcode | address;
982 emit(instr);
Steve Block44f0eee2011-05-26 01:26:41 +0100983 BlockTrampolinePoolFor(1); // For associated delay slot.
984}
985
986
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000987// Returns the next free trampoline entry.
988int32_t Assembler::get_trampoline_entry(int32_t pos) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000989 int32_t trampoline_entry = kInvalidSlotPos;
Steve Block44f0eee2011-05-26 01:26:41 +0100990
Ben Murdoch257744e2011-11-30 15:57:28 +0000991 if (!internal_trampoline_exception_) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000992 if (trampoline_.start() > pos) {
993 trampoline_entry = trampoline_.take_slot();
Steve Block44f0eee2011-05-26 01:26:41 +0100994 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000995
Ben Murdoch257744e2011-11-30 15:57:28 +0000996 if (kInvalidSlotPos == trampoline_entry) {
997 internal_trampoline_exception_ = true;
Steve Block44f0eee2011-05-26 01:26:41 +0100998 }
999 }
1000 return trampoline_entry;
Andrei Popescu31002712010-02-23 13:46:05 +00001001}
1002
1003
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001004uint32_t Assembler::jump_address(Label* L) {
Andrei Popescu31002712010-02-23 13:46:05 +00001005 int32_t target_pos;
Steve Block44f0eee2011-05-26 01:26:41 +01001006
Andrei Popescu31002712010-02-23 13:46:05 +00001007 if (L->is_bound()) {
1008 target_pos = L->pos();
1009 } else {
1010 if (L->is_linked()) {
Steve Block44f0eee2011-05-26 01:26:41 +01001011 target_pos = L->pos(); // L's link.
Steve Block44f0eee2011-05-26 01:26:41 +01001012 L->link_to(pc_offset());
Andrei Popescu31002712010-02-23 13:46:05 +00001013 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001014 L->link_to(pc_offset());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001015 return kEndOfJumpChain;
1016 }
1017 }
1018
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001019 uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
1020 DCHECK((imm & 3) == 0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001021
1022 return imm;
1023}
1024
1025
1026int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1027 int32_t target_pos;
1028
1029 if (L->is_bound()) {
1030 target_pos = L->pos();
1031 } else {
1032 if (L->is_linked()) {
1033 target_pos = L->pos();
1034 L->link_to(pc_offset());
1035 } else {
1036 L->link_to(pc_offset());
1037 if (!trampoline_emitted_) {
1038 unbound_labels_count_++;
1039 next_buffer_check_ -= kTrampolineSlotsSize;
1040 }
Steve Block44f0eee2011-05-26 01:26:41 +01001041 return kEndOfChain;
Andrei Popescu31002712010-02-23 13:46:05 +00001042 }
Andrei Popescu31002712010-02-23 13:46:05 +00001043 }
1044
1045 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001046 DCHECK((offset & 3) == 0);
1047 DCHECK(is_int16(offset >> 2));
1048
1049 return offset;
1050}
1051
1052
1053int32_t Assembler::branch_offset_compact(Label* L,
1054 bool jump_elimination_allowed) {
1055 int32_t target_pos;
1056 if (L->is_bound()) {
1057 target_pos = L->pos();
1058 } else {
1059 if (L->is_linked()) {
1060 target_pos = L->pos();
1061 L->link_to(pc_offset());
1062 } else {
1063 L->link_to(pc_offset());
1064 if (!trampoline_emitted_) {
1065 unbound_labels_count_++;
1066 next_buffer_check_ -= kTrampolineSlotsSize;
1067 }
1068 return kEndOfChain;
1069 }
1070 }
1071
1072 int32_t offset = target_pos - pc_offset();
1073 DCHECK((offset & 3) == 0);
1074 DCHECK(is_int16(offset >> 2));
1075
1076 return offset;
1077}
1078
1079
1080int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
1081 int32_t target_pos;
1082
1083 if (L->is_bound()) {
1084 target_pos = L->pos();
1085 } else {
1086 if (L->is_linked()) {
1087 target_pos = L->pos();
1088 L->link_to(pc_offset());
1089 } else {
1090 L->link_to(pc_offset());
1091 if (!trampoline_emitted_) {
1092 unbound_labels_count_++;
1093 next_buffer_check_ -= kTrampolineSlotsSize;
1094 }
1095 return kEndOfChain;
1096 }
1097 }
1098
1099 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1100 DCHECK((offset & 3) == 0);
1101 DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
1102
1103 return offset;
1104}
1105
1106
1107int32_t Assembler::branch_offset21_compact(Label* L,
1108 bool jump_elimination_allowed) {
1109 int32_t target_pos;
1110
1111 if (L->is_bound()) {
1112 target_pos = L->pos();
1113 } else {
1114 if (L->is_linked()) {
1115 target_pos = L->pos();
1116 L->link_to(pc_offset());
1117 } else {
1118 L->link_to(pc_offset());
1119 if (!trampoline_emitted_) {
1120 unbound_labels_count_++;
1121 next_buffer_check_ -= kTrampolineSlotsSize;
1122 }
1123 return kEndOfChain;
1124 }
1125 }
1126
1127 int32_t offset = target_pos - pc_offset();
1128 DCHECK((offset & 3) == 0);
1129 DCHECK(((offset >> 2) & 0xFFe00000) == 0); // Offset is 21bit width.
Steve Block44f0eee2011-05-26 01:26:41 +01001130
Andrei Popescu31002712010-02-23 13:46:05 +00001131 return offset;
1132}
1133
1134
1135void Assembler::label_at_put(Label* L, int at_offset) {
1136 int target_pos;
1137 if (L->is_bound()) {
1138 target_pos = L->pos();
Steve Block44f0eee2011-05-26 01:26:41 +01001139 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
Andrei Popescu31002712010-02-23 13:46:05 +00001140 } else {
1141 if (L->is_linked()) {
Steve Block44f0eee2011-05-26 01:26:41 +01001142 target_pos = L->pos(); // L's link.
1143 int32_t imm18 = target_pos - at_offset;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001144 DCHECK((imm18 & 3) == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01001145 int32_t imm16 = imm18 >> 2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001146 DCHECK(is_int16(imm16));
Steve Block44f0eee2011-05-26 01:26:41 +01001147 instr_at_put(at_offset, (imm16 & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +00001148 } else {
1149 target_pos = kEndOfChain;
Steve Block44f0eee2011-05-26 01:26:41 +01001150 instr_at_put(at_offset, 0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001151 if (!trampoline_emitted_) {
1152 unbound_labels_count_++;
1153 next_buffer_check_ -= kTrampolineSlotsSize;
1154 }
Andrei Popescu31002712010-02-23 13:46:05 +00001155 }
1156 L->link_to(at_offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001157 }
1158}
1159
1160
1161//------- Branch and jump instructions --------
1162
1163void Assembler::b(int16_t offset) {
1164 beq(zero_reg, zero_reg, offset);
1165}
1166
1167
1168void Assembler::bal(int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001169 positions_recorder()->WriteRecordedPositions();
Andrei Popescu31002712010-02-23 13:46:05 +00001170 bgezal(zero_reg, offset);
1171}
1172
1173
1174void Assembler::beq(Register rs, Register rt, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001175 BlockTrampolinePoolScope block_trampoline_pool(this);
Andrei Popescu31002712010-02-23 13:46:05 +00001176 GenInstrImmediate(BEQ, rs, rt, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001177 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001178}
1179
1180
1181void Assembler::bgez(Register rs, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001182 BlockTrampolinePoolScope block_trampoline_pool(this);
Andrei Popescu31002712010-02-23 13:46:05 +00001183 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001184 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001185}
1186
1187
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001188void Assembler::bgezc(Register rt, int16_t offset) {
1189 DCHECK(IsMipsArchVariant(kMips32r6));
1190 DCHECK(!(rt.is(zero_reg)));
1191 GenInstrImmediate(BLEZL, rt, rt, offset);
1192}
1193
1194
1195void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
1196 DCHECK(IsMipsArchVariant(kMips32r6));
1197 DCHECK(!(rs.is(zero_reg)));
1198 DCHECK(!(rt.is(zero_reg)));
1199 DCHECK(rs.code() != rt.code());
1200 GenInstrImmediate(BLEZ, rs, rt, offset);
1201}
1202
1203
1204void Assembler::bgec(Register rs, Register rt, int16_t offset) {
1205 DCHECK(IsMipsArchVariant(kMips32r6));
1206 DCHECK(!(rs.is(zero_reg)));
1207 DCHECK(!(rt.is(zero_reg)));
1208 DCHECK(rs.code() != rt.code());
1209 GenInstrImmediate(BLEZL, rs, rt, offset);
1210}
1211
1212
Andrei Popescu31002712010-02-23 13:46:05 +00001213void Assembler::bgezal(Register rs, int16_t offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001214 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
Steve Block44f0eee2011-05-26 01:26:41 +01001215 BlockTrampolinePoolScope block_trampoline_pool(this);
1216 positions_recorder()->WriteRecordedPositions();
Andrei Popescu31002712010-02-23 13:46:05 +00001217 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001218 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001219}
1220
1221
1222void Assembler::bgtz(Register rs, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001223 BlockTrampolinePoolScope block_trampoline_pool(this);
Andrei Popescu31002712010-02-23 13:46:05 +00001224 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001225 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001226}
1227
1228
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001229void Assembler::bgtzc(Register rt, int16_t offset) {
1230 DCHECK(IsMipsArchVariant(kMips32r6));
1231 DCHECK(!(rt.is(zero_reg)));
1232 GenInstrImmediate(BGTZL, zero_reg, rt, offset);
1233}
1234
1235
Andrei Popescu31002712010-02-23 13:46:05 +00001236void Assembler::blez(Register rs, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001237 BlockTrampolinePoolScope block_trampoline_pool(this);
Andrei Popescu31002712010-02-23 13:46:05 +00001238 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001239 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001240}
1241
1242
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001243void Assembler::blezc(Register rt, int16_t offset) {
1244 DCHECK(IsMipsArchVariant(kMips32r6));
1245 DCHECK(!(rt.is(zero_reg)));
1246 GenInstrImmediate(BLEZL, zero_reg, rt, offset);
1247}
1248
1249
1250void Assembler::bltzc(Register rt, int16_t offset) {
1251 DCHECK(IsMipsArchVariant(kMips32r6));
1252 DCHECK(!(rt.is(zero_reg)));
1253 GenInstrImmediate(BGTZL, rt, rt, offset);
1254}
1255
1256
1257void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
1258 DCHECK(IsMipsArchVariant(kMips32r6));
1259 DCHECK(!(rs.is(zero_reg)));
1260 DCHECK(!(rt.is(zero_reg)));
1261 DCHECK(rs.code() != rt.code());
1262 GenInstrImmediate(BGTZ, rs, rt, offset);
1263}
1264
1265
1266void Assembler::bltc(Register rs, Register rt, int16_t offset) {
1267 DCHECK(IsMipsArchVariant(kMips32r6));
1268 DCHECK(!(rs.is(zero_reg)));
1269 DCHECK(!(rt.is(zero_reg)));
1270 DCHECK(rs.code() != rt.code());
1271 GenInstrImmediate(BGTZL, rs, rt, offset);
1272}
1273
1274
Andrei Popescu31002712010-02-23 13:46:05 +00001275void Assembler::bltz(Register rs, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001276 BlockTrampolinePoolScope block_trampoline_pool(this);
Andrei Popescu31002712010-02-23 13:46:05 +00001277 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001278 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001279}
1280
1281
1282void Assembler::bltzal(Register rs, int16_t offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001283 DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
Steve Block44f0eee2011-05-26 01:26:41 +01001284 BlockTrampolinePoolScope block_trampoline_pool(this);
1285 positions_recorder()->WriteRecordedPositions();
Andrei Popescu31002712010-02-23 13:46:05 +00001286 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001287 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001288}
1289
1290
1291void Assembler::bne(Register rs, Register rt, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001292 BlockTrampolinePoolScope block_trampoline_pool(this);
Andrei Popescu31002712010-02-23 13:46:05 +00001293 GenInstrImmediate(BNE, rs, rt, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001294 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001295}
1296
1297
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001298void Assembler::bovc(Register rs, Register rt, int16_t offset) {
1299 DCHECK(IsMipsArchVariant(kMips32r6));
1300 DCHECK(!(rs.is(zero_reg)));
1301 DCHECK(rs.code() >= rt.code());
1302 GenInstrImmediate(ADDI, rs, rt, offset);
1303}
1304
1305
1306void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
1307 DCHECK(IsMipsArchVariant(kMips32r6));
1308 DCHECK(!(rs.is(zero_reg)));
1309 DCHECK(rs.code() >= rt.code());
1310 GenInstrImmediate(DADDI, rs, rt, offset);
1311}
1312
1313
1314void Assembler::blezalc(Register rt, int16_t offset) {
1315 DCHECK(IsMipsArchVariant(kMips32r6));
1316 DCHECK(!(rt.is(zero_reg)));
1317 GenInstrImmediate(BLEZ, zero_reg, rt, offset);
1318}
1319
1320
1321void Assembler::bgezalc(Register rt, int16_t offset) {
1322 DCHECK(IsMipsArchVariant(kMips32r6));
1323 DCHECK(!(rt.is(zero_reg)));
1324 GenInstrImmediate(BLEZ, rt, rt, offset);
1325}
1326
1327
1328void Assembler::bgezall(Register rs, int16_t offset) {
1329 DCHECK(IsMipsArchVariant(kMips32r6));
1330 DCHECK(!(rs.is(zero_reg)));
1331 GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
1332}
1333
1334
1335void Assembler::bltzalc(Register rt, int16_t offset) {
1336 DCHECK(IsMipsArchVariant(kMips32r6));
1337 DCHECK(!(rt.is(zero_reg)));
1338 GenInstrImmediate(BGTZ, rt, rt, offset);
1339}
1340
1341
1342void Assembler::bgtzalc(Register rt, int16_t offset) {
1343 DCHECK(IsMipsArchVariant(kMips32r6));
1344 DCHECK(!(rt.is(zero_reg)));
1345 GenInstrImmediate(BGTZ, zero_reg, rt, offset);
1346}
1347
1348
1349void Assembler::beqzalc(Register rt, int16_t offset) {
1350 DCHECK(IsMipsArchVariant(kMips32r6));
1351 DCHECK(!(rt.is(zero_reg)));
1352 GenInstrImmediate(ADDI, zero_reg, rt, offset);
1353}
1354
1355
1356void Assembler::bnezalc(Register rt, int16_t offset) {
1357 DCHECK(IsMipsArchVariant(kMips32r6));
1358 DCHECK(!(rt.is(zero_reg)));
1359 GenInstrImmediate(DADDI, zero_reg, rt, offset);
1360}
1361
1362
1363void Assembler::beqc(Register rs, Register rt, int16_t offset) {
1364 DCHECK(IsMipsArchVariant(kMips32r6));
1365 DCHECK(rs.code() < rt.code());
1366 GenInstrImmediate(ADDI, rs, rt, offset);
1367}
1368
1369
1370void Assembler::beqzc(Register rs, int32_t offset) {
1371 DCHECK(IsMipsArchVariant(kMips32r6));
1372 DCHECK(!(rs.is(zero_reg)));
1373 Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
1374 emit(instr);
1375}
1376
1377
1378void Assembler::bnec(Register rs, Register rt, int16_t offset) {
1379 DCHECK(IsMipsArchVariant(kMips32r6));
1380 DCHECK(rs.code() < rt.code());
1381 GenInstrImmediate(DADDI, rs, rt, offset);
1382}
1383
1384
1385void Assembler::bnezc(Register rs, int32_t offset) {
1386 DCHECK(IsMipsArchVariant(kMips32r6));
1387 DCHECK(!(rs.is(zero_reg)));
1388 Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
1389 emit(instr);
1390}
1391
1392
Andrei Popescu31002712010-02-23 13:46:05 +00001393void Assembler::j(int32_t target) {
Ben Murdoch589d6972011-11-30 16:04:58 +00001394#if DEBUG
1395 // Get pc of delay slot.
1396 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001397 bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1398 (kImm26Bits + kImmFieldShift)) == 0;
1399 DCHECK(in_range && ((target & 3) == 0));
Ben Murdoch589d6972011-11-30 16:04:58 +00001400#endif
Andrei Popescu31002712010-02-23 13:46:05 +00001401 GenInstrJump(J, target >> 2);
1402}
1403
1404
1405void Assembler::jr(Register rs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001406 if (!IsMipsArchVariant(kMips32r6)) {
1407 BlockTrampolinePoolScope block_trampoline_pool(this);
1408 if (rs.is(ra)) {
1409 positions_recorder()->WriteRecordedPositions();
1410 }
1411 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1412 BlockTrampolinePoolFor(1); // For associated delay slot.
1413 } else {
1414 jalr(rs, zero_reg);
Steve Block44f0eee2011-05-26 01:26:41 +01001415 }
Andrei Popescu31002712010-02-23 13:46:05 +00001416}
1417
1418
1419void Assembler::jal(int32_t target) {
Ben Murdoch589d6972011-11-30 16:04:58 +00001420#ifdef DEBUG
1421 // Get pc of delay slot.
1422 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001423 bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1424 (kImm26Bits + kImmFieldShift)) == 0;
1425 DCHECK(in_range && ((target & 3) == 0));
Ben Murdoch589d6972011-11-30 16:04:58 +00001426#endif
Steve Block44f0eee2011-05-26 01:26:41 +01001427 positions_recorder()->WriteRecordedPositions();
Andrei Popescu31002712010-02-23 13:46:05 +00001428 GenInstrJump(JAL, target >> 2);
1429}
1430
1431
1432void Assembler::jalr(Register rs, Register rd) {
Steve Block44f0eee2011-05-26 01:26:41 +01001433 BlockTrampolinePoolScope block_trampoline_pool(this);
1434 positions_recorder()->WriteRecordedPositions();
Andrei Popescu31002712010-02-23 13:46:05 +00001435 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
Steve Block44f0eee2011-05-26 01:26:41 +01001436 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001437}
1438
1439
Ben Murdoch589d6972011-11-30 16:04:58 +00001440void Assembler::j_or_jr(int32_t target, Register rs) {
1441 // Get pc of delay slot.
1442 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001443 bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1444 (kImm26Bits + kImmFieldShift)) == 0;
Ben Murdoch589d6972011-11-30 16:04:58 +00001445 if (in_range) {
1446 j(target);
1447 } else {
1448 jr(t9);
1449 }
1450}
1451
1452
1453void Assembler::jal_or_jalr(int32_t target, Register rs) {
1454 // Get pc of delay slot.
1455 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001456 bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1457 (kImm26Bits+kImmFieldShift)) == 0;
Ben Murdoch589d6972011-11-30 16:04:58 +00001458 if (in_range) {
1459 jal(target);
1460 } else {
1461 jalr(t9);
1462 }
1463}
1464
1465
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001466// -------Data-processing-instructions---------
Andrei Popescu31002712010-02-23 13:46:05 +00001467
1468// Arithmetic.
1469
Andrei Popescu31002712010-02-23 13:46:05 +00001470void Assembler::addu(Register rd, Register rs, Register rt) {
1471 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1472}
1473
1474
Andrei Popescu31002712010-02-23 13:46:05 +00001475void Assembler::addiu(Register rd, Register rs, int32_t j) {
1476 GenInstrImmediate(ADDIU, rs, rd, j);
Andrei Popescu31002712010-02-23 13:46:05 +00001477}
1478
1479
1480void Assembler::subu(Register rd, Register rs, Register rt) {
1481 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1482}
1483
1484
1485void Assembler::mul(Register rd, Register rs, Register rt) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001486 if (!IsMipsArchVariant(kMips32r6)) {
1487 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1488 } else {
1489 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
1490 }
1491}
1492
1493
1494void Assembler::mulu(Register rd, Register rs, Register rt) {
1495 DCHECK(IsMipsArchVariant(kMips32r6));
1496 GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
1497}
1498
1499
1500void Assembler::muh(Register rd, Register rs, Register rt) {
1501 DCHECK(IsMipsArchVariant(kMips32r6));
1502 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
1503}
1504
1505
1506void Assembler::muhu(Register rd, Register rs, Register rt) {
1507 DCHECK(IsMipsArchVariant(kMips32r6));
1508 GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
1509}
1510
1511
1512void Assembler::mod(Register rd, Register rs, Register rt) {
1513 DCHECK(IsMipsArchVariant(kMips32r6));
1514 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
1515}
1516
1517
1518void Assembler::modu(Register rd, Register rs, Register rt) {
1519 DCHECK(IsMipsArchVariant(kMips32r6));
1520 GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
Andrei Popescu31002712010-02-23 13:46:05 +00001521}
1522
1523
1524void Assembler::mult(Register rs, Register rt) {
1525 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1526}
1527
1528
1529void Assembler::multu(Register rs, Register rt) {
1530 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1531}
1532
1533
1534void Assembler::div(Register rs, Register rt) {
1535 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1536}
1537
1538
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001539void Assembler::div(Register rd, Register rs, Register rt) {
1540 DCHECK(IsMipsArchVariant(kMips32r6));
1541 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
1542}
1543
1544
Andrei Popescu31002712010-02-23 13:46:05 +00001545void Assembler::divu(Register rs, Register rt) {
1546 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1547}
1548
1549
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001550void Assembler::divu(Register rd, Register rs, Register rt) {
1551 DCHECK(IsMipsArchVariant(kMips32r6));
1552 GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
1553}
1554
1555
Andrei Popescu31002712010-02-23 13:46:05 +00001556// Logical.
1557
1558void Assembler::and_(Register rd, Register rs, Register rt) {
1559 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1560}
1561
1562
1563void Assembler::andi(Register rt, Register rs, int32_t j) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001564 DCHECK(is_uint16(j));
Andrei Popescu31002712010-02-23 13:46:05 +00001565 GenInstrImmediate(ANDI, rs, rt, j);
1566}
1567
1568
1569void Assembler::or_(Register rd, Register rs, Register rt) {
1570 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1571}
1572
1573
1574void Assembler::ori(Register rt, Register rs, int32_t j) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001575 DCHECK(is_uint16(j));
Andrei Popescu31002712010-02-23 13:46:05 +00001576 GenInstrImmediate(ORI, rs, rt, j);
1577}
1578
1579
1580void Assembler::xor_(Register rd, Register rs, Register rt) {
1581 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1582}
1583
1584
1585void Assembler::xori(Register rt, Register rs, int32_t j) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001586 DCHECK(is_uint16(j));
Andrei Popescu31002712010-02-23 13:46:05 +00001587 GenInstrImmediate(XORI, rs, rt, j);
1588}
1589
1590
1591void Assembler::nor(Register rd, Register rs, Register rt) {
1592 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1593}
1594
1595
1596// Shifts.
Steve Block44f0eee2011-05-26 01:26:41 +01001597void Assembler::sll(Register rd,
1598 Register rt,
1599 uint16_t sa,
1600 bool coming_from_nop) {
1601 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1602 // generated using the sll instruction. They must be generated using
1603 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1604 // instructions.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001605 DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
Andrei Popescu31002712010-02-23 13:46:05 +00001606 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1607}
1608
1609
1610void Assembler::sllv(Register rd, Register rt, Register rs) {
1611 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1612}
1613
1614
1615void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1616 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1617}
1618
1619
1620void Assembler::srlv(Register rd, Register rt, Register rs) {
1621 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1622}
1623
1624
1625void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1626 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1627}
1628
1629
1630void Assembler::srav(Register rd, Register rt, Register rs) {
1631 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1632}
1633
1634
Steve Block44f0eee2011-05-26 01:26:41 +01001635void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1636 // Should be called via MacroAssembler::Ror.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001637 DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1638 DCHECK(IsMipsArchVariant(kMips32r2));
Steve Block44f0eee2011-05-26 01:26:41 +01001639 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1640 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1641 emit(instr);
1642}
1643
1644
1645void Assembler::rotrv(Register rd, Register rt, Register rs) {
1646 // Should be called via MacroAssembler::Ror.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001647 DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1648 DCHECK(IsMipsArchVariant(kMips32r2));
Steve Block44f0eee2011-05-26 01:26:41 +01001649 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1650 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1651 emit(instr);
1652}
1653
1654
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001655// ------------Memory-instructions-------------
Andrei Popescu31002712010-02-23 13:46:05 +00001656
Steve Block44f0eee2011-05-26 01:26:41 +01001657// Helper for base-reg + offset, when offset is larger than int16.
1658void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001659 DCHECK(!src.rm().is(at));
1660 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
Steve Block44f0eee2011-05-26 01:26:41 +01001661 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1662 addu(at, at, src.rm()); // Add base register.
1663}
1664
1665
Andrei Popescu31002712010-02-23 13:46:05 +00001666void Assembler::lb(Register rd, const MemOperand& rs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001667 if (is_int16(rs.offset_)) {
1668 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1669 } else { // Offset > 16 bits, use multiple instructions to load.
1670 LoadRegPlusOffsetToAt(rs);
1671 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1672 }
Andrei Popescu31002712010-02-23 13:46:05 +00001673}
1674
1675
1676void Assembler::lbu(Register rd, const MemOperand& rs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001677 if (is_int16(rs.offset_)) {
1678 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1679 } else { // Offset > 16 bits, use multiple instructions to load.
1680 LoadRegPlusOffsetToAt(rs);
1681 GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1682 }
1683}
1684
1685
1686void Assembler::lh(Register rd, const MemOperand& rs) {
1687 if (is_int16(rs.offset_)) {
1688 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1689 } else { // Offset > 16 bits, use multiple instructions to load.
1690 LoadRegPlusOffsetToAt(rs);
1691 GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1692 }
1693}
1694
1695
1696void Assembler::lhu(Register rd, const MemOperand& rs) {
1697 if (is_int16(rs.offset_)) {
1698 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1699 } else { // Offset > 16 bits, use multiple instructions to load.
1700 LoadRegPlusOffsetToAt(rs);
1701 GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1702 }
Andrei Popescu31002712010-02-23 13:46:05 +00001703}
1704
1705
1706void Assembler::lw(Register rd, const MemOperand& rs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001707 if (is_int16(rs.offset_)) {
1708 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1709 } else { // Offset > 16 bits, use multiple instructions to load.
1710 LoadRegPlusOffsetToAt(rs);
1711 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1712 }
Steve Block44f0eee2011-05-26 01:26:41 +01001713}
1714
1715
1716void Assembler::lwl(Register rd, const MemOperand& rs) {
1717 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1718}
1719
1720
1721void Assembler::lwr(Register rd, const MemOperand& rs) {
1722 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
Andrei Popescu31002712010-02-23 13:46:05 +00001723}
1724
1725
1726void Assembler::sb(Register rd, const MemOperand& rs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001727 if (is_int16(rs.offset_)) {
1728 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1729 } else { // Offset > 16 bits, use multiple instructions to store.
1730 LoadRegPlusOffsetToAt(rs);
1731 GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1732 }
1733}
1734
1735
1736void Assembler::sh(Register rd, const MemOperand& rs) {
1737 if (is_int16(rs.offset_)) {
1738 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1739 } else { // Offset > 16 bits, use multiple instructions to store.
1740 LoadRegPlusOffsetToAt(rs);
1741 GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1742 }
Andrei Popescu31002712010-02-23 13:46:05 +00001743}
1744
1745
1746void Assembler::sw(Register rd, const MemOperand& rs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001747 if (is_int16(rs.offset_)) {
1748 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1749 } else { // Offset > 16 bits, use multiple instructions to store.
1750 LoadRegPlusOffsetToAt(rs);
1751 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1752 }
Steve Block44f0eee2011-05-26 01:26:41 +01001753}
1754
1755
1756void Assembler::swl(Register rd, const MemOperand& rs) {
1757 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1758}
1759
1760
1761void Assembler::swr(Register rd, const MemOperand& rs) {
1762 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
Andrei Popescu31002712010-02-23 13:46:05 +00001763}
1764
1765
1766void Assembler::lui(Register rd, int32_t j) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001767 DCHECK(is_uint16(j));
Andrei Popescu31002712010-02-23 13:46:05 +00001768 GenInstrImmediate(LUI, zero_reg, rd, j);
1769}
1770
1771
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001772void Assembler::aui(Register rs, Register rt, int32_t j) {
1773 // This instruction uses same opcode as 'lui'. The difference in encoding is
1774 // 'lui' has zero reg. for rs field.
1775 DCHECK(is_uint16(j));
1776 GenInstrImmediate(LUI, rs, rt, j);
1777}
1778
1779
1780// -------------Misc-instructions--------------
Andrei Popescu31002712010-02-23 13:46:05 +00001781
1782// Break / Trap instructions.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001783void Assembler::break_(uint32_t code, bool break_as_stop) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001784 DCHECK((code & ~0xfffff) == 0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001785 // We need to invalidate breaks that could be stops as well because the
1786 // simulator expects a char pointer after the stop instruction.
1787 // See constants-mips.h for explanation.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001788 DCHECK((break_as_stop &&
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001789 code <= kMaxStopCode &&
1790 code > kMaxWatchpointCode) ||
1791 (!break_as_stop &&
1792 (code > kMaxStopCode ||
1793 code <= kMaxWatchpointCode)));
Andrei Popescu31002712010-02-23 13:46:05 +00001794 Instr break_instr = SPECIAL | BREAK | (code << 6);
1795 emit(break_instr);
1796}
1797
1798
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001799void Assembler::stop(const char* msg, uint32_t code) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001800 DCHECK(code > kMaxWatchpointCode);
1801 DCHECK(code <= kMaxStopCode);
1802#if V8_HOST_ARCH_MIPS
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001803 break_(0x54321);
1804#else // V8_HOST_ARCH_MIPS
1805 BlockTrampolinePoolFor(2);
1806 // The Simulator will handle the stop instruction and get the message address.
1807 // On MIPS stop() is just a special kind of break_().
1808 break_(code, true);
1809 emit(reinterpret_cast<Instr>(msg));
1810#endif
1811}
1812
1813
Andrei Popescu31002712010-02-23 13:46:05 +00001814void Assembler::tge(Register rs, Register rt, uint16_t code) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001815 DCHECK(is_uint10(code));
Andrei Popescu31002712010-02-23 13:46:05 +00001816 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1817 | rt.code() << kRtShift | code << 6;
1818 emit(instr);
1819}
1820
1821
1822void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001823 DCHECK(is_uint10(code));
Andrei Popescu31002712010-02-23 13:46:05 +00001824 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1825 | rt.code() << kRtShift | code << 6;
1826 emit(instr);
1827}
1828
1829
1830void Assembler::tlt(Register rs, Register rt, uint16_t code) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001831 DCHECK(is_uint10(code));
Andrei Popescu31002712010-02-23 13:46:05 +00001832 Instr instr =
1833 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1834 emit(instr);
1835}
1836
1837
1838void Assembler::tltu(Register rs, Register rt, uint16_t code) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001839 DCHECK(is_uint10(code));
Steve Block44f0eee2011-05-26 01:26:41 +01001840 Instr instr =
1841 SPECIAL | TLTU | rs.code() << kRsShift
Andrei Popescu31002712010-02-23 13:46:05 +00001842 | rt.code() << kRtShift | code << 6;
1843 emit(instr);
1844}
1845
1846
1847void Assembler::teq(Register rs, Register rt, uint16_t code) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001848 DCHECK(is_uint10(code));
Andrei Popescu31002712010-02-23 13:46:05 +00001849 Instr instr =
1850 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1851 emit(instr);
1852}
1853
1854
1855void Assembler::tne(Register rs, Register rt, uint16_t code) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001856 DCHECK(is_uint10(code));
Andrei Popescu31002712010-02-23 13:46:05 +00001857 Instr instr =
1858 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1859 emit(instr);
1860}
1861
1862
1863// Move from HI/LO register.
1864
1865void Assembler::mfhi(Register rd) {
1866 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1867}
1868
1869
1870void Assembler::mflo(Register rd) {
1871 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1872}
1873
1874
1875// Set on less than instructions.
1876void Assembler::slt(Register rd, Register rs, Register rt) {
1877 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1878}
1879
1880
1881void Assembler::sltu(Register rd, Register rs, Register rt) {
1882 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1883}
1884
1885
1886void Assembler::slti(Register rt, Register rs, int32_t j) {
1887 GenInstrImmediate(SLTI, rs, rt, j);
1888}
1889
1890
1891void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1892 GenInstrImmediate(SLTIU, rs, rt, j);
1893}
1894
1895
Steve Block44f0eee2011-05-26 01:26:41 +01001896// Conditional move.
1897void Assembler::movz(Register rd, Register rs, Register rt) {
1898 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1899}
1900
1901
1902void Assembler::movn(Register rd, Register rs, Register rt) {
1903 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1904}
1905
1906
1907void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1908 Register rt;
Ben Murdoch257744e2011-11-30 15:57:28 +00001909 rt.code_ = (cc & 0x0007) << 2 | 1;
Steve Block44f0eee2011-05-26 01:26:41 +01001910 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1911}
1912
1913
1914void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1915 Register rt;
Ben Murdoch257744e2011-11-30 15:57:28 +00001916 rt.code_ = (cc & 0x0007) << 2 | 0;
Steve Block44f0eee2011-05-26 01:26:41 +01001917 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1918}
1919
1920
1921// Bit twiddling.
1922void Assembler::clz(Register rd, Register rs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001923 if (!IsMipsArchVariant(kMips32r6)) {
1924 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1925 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1926 } else {
1927 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
1928 }
Steve Block44f0eee2011-05-26 01:26:41 +01001929}
1930
1931
1932void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1933 // Should be called via MacroAssembler::Ins.
1934 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001935 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
Steve Block44f0eee2011-05-26 01:26:41 +01001936 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1937}
1938
1939
1940void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1941 // Should be called via MacroAssembler::Ext.
1942 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001943 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
Steve Block44f0eee2011-05-26 01:26:41 +01001944 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1945}
1946
1947
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001948void Assembler::pref(int32_t hint, const MemOperand& rs) {
1949 DCHECK(!IsMipsArchVariant(kLoongson));
1950 DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
1951 Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
1952 | (rs.offset_);
1953 emit(instr);
1954}
1955
1956
1957// --------Coprocessor-instructions----------------
Andrei Popescu31002712010-02-23 13:46:05 +00001958
1959// Load, store, move.
1960void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1961 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1962}
1963
1964
1965void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
Steve Block44f0eee2011-05-26 01:26:41 +01001966 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1967 // load to two 32-bit loads.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001968 if (IsFp64Mode()) {
1969 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
1970 Register::kMantissaOffset);
1971 GenInstrImmediate(LW, src.rm(), at, src.offset_ +
1972 Register::kExponentOffset);
1973 mthc1(at, fd);
1974 } else {
1975 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
1976 Register::kMantissaOffset);
1977 FPURegister nextfpreg;
1978 nextfpreg.setcode(fd.code() + 1);
1979 GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
1980 Register::kExponentOffset);
1981 }
Andrei Popescu31002712010-02-23 13:46:05 +00001982}
1983
1984
1985void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1986 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1987}
1988
1989
1990void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
Steve Block44f0eee2011-05-26 01:26:41 +01001991 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1992 // store to two 32-bit stores.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001993 if (IsFp64Mode()) {
1994 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
1995 Register::kMantissaOffset);
1996 mfhc1(at, fd);
1997 GenInstrImmediate(SW, src.rm(), at, src.offset_ +
1998 Register::kExponentOffset);
1999 } else {
2000 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
2001 Register::kMantissaOffset);
2002 FPURegister nextfpreg;
2003 nextfpreg.setcode(fd.code() + 1);
2004 GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
2005 Register::kExponentOffset);
2006 }
Andrei Popescu31002712010-02-23 13:46:05 +00002007}
2008
2009
Steve Block44f0eee2011-05-26 01:26:41 +01002010void Assembler::mtc1(Register rt, FPURegister fs) {
Andrei Popescu31002712010-02-23 13:46:05 +00002011 GenInstrRegister(COP1, MTC1, rt, fs, f0);
2012}
2013
2014
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002015void Assembler::mthc1(Register rt, FPURegister fs) {
2016 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
2017}
2018
2019
Steve Block44f0eee2011-05-26 01:26:41 +01002020void Assembler::mfc1(Register rt, FPURegister fs) {
Andrei Popescu31002712010-02-23 13:46:05 +00002021 GenInstrRegister(COP1, MFC1, rt, fs, f0);
2022}
2023
2024
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002025void Assembler::mfhc1(Register rt, FPURegister fs) {
2026 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
2027}
2028
2029
Steve Block44f0eee2011-05-26 01:26:41 +01002030void Assembler::ctc1(Register rt, FPUControlRegister fs) {
2031 GenInstrRegister(COP1, CTC1, rt, fs);
2032}
2033
2034
2035void Assembler::cfc1(Register rt, FPUControlRegister fs) {
2036 GenInstrRegister(COP1, CFC1, rt, fs);
2037}
2038
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002039
Ben Murdoch589d6972011-11-30 16:04:58 +00002040void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2041 uint64_t i;
2042 memcpy(&i, &d, 8);
2043
2044 *lo = i & 0xffffffff;
2045 *hi = i >> 32;
2046}
Steve Block44f0eee2011-05-26 01:26:41 +01002047
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002048
Steve Block44f0eee2011-05-26 01:26:41 +01002049// Arithmetic.
2050
2051void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2052 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
2053}
2054
2055
2056void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2057 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
2058}
2059
2060
2061void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2062 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
2063}
2064
2065
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002066void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2067 FPURegister ft) {
2068 GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
2069}
2070
2071
Steve Block44f0eee2011-05-26 01:26:41 +01002072void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
2073 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
2074}
2075
2076
2077void Assembler::abs_d(FPURegister fd, FPURegister fs) {
2078 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
2079}
2080
2081
2082void Assembler::mov_d(FPURegister fd, FPURegister fs) {
2083 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
2084}
2085
2086
2087void Assembler::neg_d(FPURegister fd, FPURegister fs) {
2088 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
2089}
2090
2091
2092void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
2093 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
Andrei Popescu31002712010-02-23 13:46:05 +00002094}
2095
2096
2097// Conversions.
2098
2099void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
2100 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
2101}
2102
2103
2104void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
2105 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
2106}
2107
2108
Steve Block44f0eee2011-05-26 01:26:41 +01002109void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
2110 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
2111}
2112
2113
2114void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
2115 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
2116}
2117
2118
2119void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
2120 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
2121}
2122
2123
2124void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
2125 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
2126}
2127
2128
2129void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
2130 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
2131}
2132
2133
2134void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
2135 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
2136}
2137
2138
2139void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
2140 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
2141}
2142
2143
2144void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
2145 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
2146}
2147
2148
Andrei Popescu31002712010-02-23 13:46:05 +00002149void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002150 DCHECK(IsMipsArchVariant(kMips32r2));
Andrei Popescu31002712010-02-23 13:46:05 +00002151 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
2152}
2153
2154
2155void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002156 DCHECK(IsMipsArchVariant(kMips32r2));
Andrei Popescu31002712010-02-23 13:46:05 +00002157 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
2158}
2159
2160
Steve Block44f0eee2011-05-26 01:26:41 +01002161void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002162 DCHECK(IsMipsArchVariant(kMips32r2));
Steve Block44f0eee2011-05-26 01:26:41 +01002163 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
2164}
2165
2166
2167void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002168 DCHECK(IsMipsArchVariant(kMips32r2));
Steve Block44f0eee2011-05-26 01:26:41 +01002169 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
2170}
2171
2172
2173void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
2174 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
2175}
2176
2177
2178void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
2179 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
2180}
2181
2182
2183void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
2184 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
2185}
2186
2187
2188void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
2189 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
2190}
2191
2192
2193void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
2194 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
2195}
2196
2197
2198void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
2199 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
2200}
2201
2202
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002203void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
2204 FPURegister fs) {
2205 DCHECK(IsMipsArchVariant(kMips32r6));
2206 DCHECK((fmt == D) || (fmt == S));
2207 GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
2208}
2209
2210
2211void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
2212 FPURegister fs) {
2213 DCHECK(IsMipsArchVariant(kMips32r6));
2214 DCHECK((fmt == D) || (fmt == S));
2215 GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
2216}
2217
2218
2219void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
2220 FPURegister fs) {
2221 DCHECK(IsMipsArchVariant(kMips32r6));
2222 DCHECK((fmt == D) || (fmt == S));
2223 GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
2224}
2225
2226
2227void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
2228 FPURegister fs) {
2229 DCHECK(IsMipsArchVariant(kMips32r6));
2230 DCHECK((fmt == D) || (fmt == S));
2231 GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
2232}
2233
2234
Andrei Popescu31002712010-02-23 13:46:05 +00002235void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
2236 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
2237}
2238
2239
2240void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002241 DCHECK(IsMipsArchVariant(kMips32r2));
Andrei Popescu31002712010-02-23 13:46:05 +00002242 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
2243}
2244
2245
2246void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
2247 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
2248}
2249
2250
2251void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
2252 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
2253}
2254
2255
2256void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002257 DCHECK(IsMipsArchVariant(kMips32r2));
Andrei Popescu31002712010-02-23 13:46:05 +00002258 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
2259}
2260
2261
2262void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
2263 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
2264}
2265
2266
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002267// Conditions for >= MIPSr6.
2268void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
2269 FPURegister fd, FPURegister fs, FPURegister ft) {
2270 DCHECK(IsMipsArchVariant(kMips32r6));
2271 DCHECK((fmt & ~(31 << kRsShift)) == 0);
2272 Instr instr = COP1 | fmt | ft.code() << kFtShift |
2273 fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
2274 emit(instr);
2275}
2276
2277
2278void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
2279 DCHECK(IsMipsArchVariant(kMips32r6));
2280 Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
2281 emit(instr);
2282}
2283
2284
2285void Assembler::bc1nez(int16_t offset, FPURegister ft) {
2286 DCHECK(IsMipsArchVariant(kMips32r6));
2287 Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
2288 emit(instr);
2289}
2290
2291
2292// Conditions for < MIPSr6.
Andrei Popescu31002712010-02-23 13:46:05 +00002293void Assembler::c(FPUCondition cond, SecondaryField fmt,
Steve Block44f0eee2011-05-26 01:26:41 +01002294 FPURegister fs, FPURegister ft, uint16_t cc) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002295 DCHECK(is_uint3(cc));
2296 DCHECK((fmt & ~(31 << kRsShift)) == 0);
Andrei Popescu31002712010-02-23 13:46:05 +00002297 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
2298 | cc << 8 | 3 << 4 | cond;
2299 emit(instr);
2300}
2301
2302
Steve Block44f0eee2011-05-26 01:26:41 +01002303void Assembler::fcmp(FPURegister src1, const double src2,
2304 FPUCondition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002305 DCHECK(src2 == 0.0);
Steve Block44f0eee2011-05-26 01:26:41 +01002306 mtc1(zero_reg, f14);
2307 cvt_d_w(f14, f14);
2308 c(cond, D, src1, f14, 0);
2309}
2310
2311
Andrei Popescu31002712010-02-23 13:46:05 +00002312void Assembler::bc1f(int16_t offset, uint16_t cc) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002313 DCHECK(is_uint3(cc));
Andrei Popescu31002712010-02-23 13:46:05 +00002314 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
2315 emit(instr);
2316}
2317
2318
2319void Assembler::bc1t(int16_t offset, uint16_t cc) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002320 DCHECK(is_uint3(cc));
Andrei Popescu31002712010-02-23 13:46:05 +00002321 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
2322 emit(instr);
2323}
2324
2325
2326// Debugging.
2327void Assembler::RecordJSReturn() {
Steve Block44f0eee2011-05-26 01:26:41 +01002328 positions_recorder()->WriteRecordedPositions();
Andrei Popescu31002712010-02-23 13:46:05 +00002329 CheckBuffer();
2330 RecordRelocInfo(RelocInfo::JS_RETURN);
2331}
2332
2333
Steve Block44f0eee2011-05-26 01:26:41 +01002334void Assembler::RecordDebugBreakSlot() {
2335 positions_recorder()->WriteRecordedPositions();
2336 CheckBuffer();
2337 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2338}
2339
2340
Andrei Popescu31002712010-02-23 13:46:05 +00002341void Assembler::RecordComment(const char* msg) {
Steve Block44f0eee2011-05-26 01:26:41 +01002342 if (FLAG_code_comments) {
Andrei Popescu31002712010-02-23 13:46:05 +00002343 CheckBuffer();
2344 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2345 }
2346}
2347
2348
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002349int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
2350 Instr instr = instr_at(pc);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002351 DCHECK(IsJ(instr) || IsLui(instr));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002352 if (IsLui(instr)) {
2353 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
2354 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002355 DCHECK(IsOri(instr_ori));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002356 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
2357 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
2358 if (imm == kEndOfJumpChain) {
2359 return 0; // Number of instructions patched.
2360 }
2361 imm += pc_delta;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002362 DCHECK((imm & 3) == 0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002363
2364 instr_lui &= ~kImm16Mask;
2365 instr_ori &= ~kImm16Mask;
2366
2367 instr_at_put(pc + 0 * Assembler::kInstrSize,
2368 instr_lui | ((imm >> kLuiShift) & kImm16Mask));
2369 instr_at_put(pc + 1 * Assembler::kInstrSize,
2370 instr_ori | (imm & kImm16Mask));
2371 return 2; // Number of instructions patched.
2372 } else {
2373 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002374 if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002375 return 0; // Number of instructions patched.
2376 }
2377 imm28 += pc_delta;
2378 imm28 &= kImm28Mask;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002379 DCHECK((imm28 & 3) == 0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002380
2381 instr &= ~kImm26Mask;
2382 uint32_t imm26 = imm28 >> 2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002383 DCHECK(is_uint26(imm26));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002384
2385 instr_at_put(pc, instr | (imm26 & kImm26Mask));
2386 return 1; // Number of instructions patched.
2387 }
2388}
2389
2390
Andrei Popescu31002712010-02-23 13:46:05 +00002391void Assembler::GrowBuffer() {
2392 if (!own_buffer_) FATAL("external code buffer is too small");
2393
2394 // Compute new buffer size.
Steve Block44f0eee2011-05-26 01:26:41 +01002395 CodeDesc desc; // The new buffer.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002396 if (buffer_size_ < 1 * MB) {
Andrei Popescu31002712010-02-23 13:46:05 +00002397 desc.buffer_size = 2*buffer_size_;
2398 } else {
2399 desc.buffer_size = buffer_size_ + 1*MB;
2400 }
Steve Block44f0eee2011-05-26 01:26:41 +01002401 CHECK_GT(desc.buffer_size, 0); // No overflow.
Andrei Popescu31002712010-02-23 13:46:05 +00002402
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002403 // Set up new buffer.
Andrei Popescu31002712010-02-23 13:46:05 +00002404 desc.buffer = NewArray<byte>(desc.buffer_size);
2405
2406 desc.instr_size = pc_offset();
2407 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2408
2409 // Copy the data.
2410 int pc_delta = desc.buffer - buffer_;
2411 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002412 MemMove(desc.buffer, buffer_, desc.instr_size);
2413 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2414 desc.reloc_size);
Andrei Popescu31002712010-02-23 13:46:05 +00002415
2416 // Switch buffers.
2417 DeleteArray(buffer_);
2418 buffer_ = desc.buffer;
2419 buffer_size_ = desc.buffer_size;
2420 pc_ += pc_delta;
2421 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2422 reloc_info_writer.last_pc() + pc_delta);
2423
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002424 // Relocate runtime entries.
2425 for (RelocIterator it(desc); !it.done(); it.next()) {
2426 RelocInfo::Mode rmode = it.rinfo()->rmode();
2427 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2428 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
2429 RelocateInternalReference(p, pc_delta);
2430 }
2431 }
Andrei Popescu31002712010-02-23 13:46:05 +00002432
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002433 DCHECK(!overflow());
Andrei Popescu31002712010-02-23 13:46:05 +00002434}
2435
2436
Steve Block44f0eee2011-05-26 01:26:41 +01002437void Assembler::db(uint8_t data) {
2438 CheckBuffer();
2439 *reinterpret_cast<uint8_t*>(pc_) = data;
2440 pc_ += sizeof(uint8_t);
2441}
2442
2443
2444void Assembler::dd(uint32_t data) {
2445 CheckBuffer();
2446 *reinterpret_cast<uint32_t*>(pc_) = data;
2447 pc_ += sizeof(uint32_t);
2448}
2449
2450
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002451void Assembler::emit_code_stub_address(Code* stub) {
2452 CheckBuffer();
2453 *reinterpret_cast<uint32_t*>(pc_) =
2454 reinterpret_cast<uint32_t>(stub->instruction_start());
2455 pc_ += sizeof(uint32_t);
2456}
2457
2458
Andrei Popescu31002712010-02-23 13:46:05 +00002459void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002460 // We do not try to reuse pool constants.
2461 RelocInfo rinfo(pc_, rmode, data, NULL);
Steve Block44f0eee2011-05-26 01:26:41 +01002462 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
Andrei Popescu31002712010-02-23 13:46:05 +00002463 // Adjust code for new modes.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002464 DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
Steve Block44f0eee2011-05-26 01:26:41 +01002465 || RelocInfo::IsJSReturn(rmode)
Andrei Popescu31002712010-02-23 13:46:05 +00002466 || RelocInfo::IsComment(rmode)
2467 || RelocInfo::IsPosition(rmode));
2468 // These modes do not need an entry in the constant pool.
2469 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002470 if (!RelocInfo::IsNone(rinfo.rmode())) {
Andrei Popescu31002712010-02-23 13:46:05 +00002471 // Don't record external references unless the heap will be serialized.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002472 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
2473 !serializer_enabled() && !emit_debug_code()) {
2474 return;
Andrei Popescu31002712010-02-23 13:46:05 +00002475 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002476 DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
Ben Murdoch257744e2011-11-30 15:57:28 +00002477 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002478 RelocInfo reloc_info_with_ast_id(pc_,
2479 rmode,
2480 RecordedAstId().ToInt(),
2481 NULL);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002482 ClearRecordedAstId();
Ben Murdoch257744e2011-11-30 15:57:28 +00002483 reloc_info_writer.Write(&reloc_info_with_ast_id);
2484 } else {
2485 reloc_info_writer.Write(&rinfo);
2486 }
Andrei Popescu31002712010-02-23 13:46:05 +00002487 }
2488}
2489
2490
Steve Block44f0eee2011-05-26 01:26:41 +01002491void Assembler::BlockTrampolinePoolFor(int instructions) {
2492 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2493}
2494
2495
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002496void Assembler::CheckTrampolinePool() {
Steve Block44f0eee2011-05-26 01:26:41 +01002497 // Some small sequences of instructions must not be broken up by the
2498 // insertion of a trampoline pool; such sequences are protected by setting
2499 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2500 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2501 // are blocked by trampoline_pool_blocked_nesting_.
2502 if ((trampoline_pool_blocked_nesting_ > 0) ||
2503 (pc_offset() < no_trampoline_pool_before_)) {
2504 // Emission is currently blocked; make sure we try again as soon as
2505 // possible.
2506 if (trampoline_pool_blocked_nesting_ > 0) {
2507 next_buffer_check_ = pc_offset() + kInstrSize;
2508 } else {
2509 next_buffer_check_ = no_trampoline_pool_before_;
2510 }
2511 return;
2512 }
2513
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002514 DCHECK(!trampoline_emitted_);
2515 DCHECK(unbound_labels_count_ >= 0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002516 if (unbound_labels_count_ > 0) {
2517 // First we emit jump (2 instructions), then we emit trampoline pool.
2518 { BlockTrampolinePoolScope block_trampoline_pool(this);
2519 Label after_pool;
Steve Block44f0eee2011-05-26 01:26:41 +01002520 b(&after_pool);
2521 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01002522
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002523 int pool_start = pc_offset();
2524 for (int i = 0; i < unbound_labels_count_; i++) {
2525 uint32_t imm32;
2526 imm32 = jump_address(&after_pool);
2527 { BlockGrowBufferScope block_buf_growth(this);
2528 // Buffer growth (and relocation) must be blocked for internal
2529 // references until associated instructions are emitted and available
2530 // to be patched.
2531 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2532 lui(at, (imm32 & kHiMask) >> kLuiShift);
2533 ori(at, at, (imm32 & kImm16Mask));
2534 }
2535 jr(at);
2536 nop();
2537 }
2538 bind(&after_pool);
2539 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2540
2541 trampoline_emitted_ = true;
2542 // As we are only going to emit trampoline once, we need to prevent any
2543 // further emission.
2544 next_buffer_check_ = kMaxInt;
2545 }
2546 } else {
2547 // Number of branches to unbound label at this point is zero, so we can
2548 // move next buffer check to maximum.
2549 next_buffer_check_ = pc_offset() +
2550 kMaxBranchOffset - kTrampolineSlotsSize * 16;
Steve Block44f0eee2011-05-26 01:26:41 +01002551 }
2552 return;
2553}
2554
2555
Andrei Popescu31002712010-02-23 13:46:05 +00002556Address Assembler::target_address_at(Address pc) {
2557 Instr instr1 = instr_at(pc);
2558 Instr instr2 = instr_at(pc + kInstrSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00002559 // Interpret 2 instructions generated by li: lui/ori
2560 if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
2561 // Assemble the 32 bit value.
Andrei Popescu31002712010-02-23 13:46:05 +00002562 return reinterpret_cast<Address>(
Ben Murdoch257744e2011-11-30 15:57:28 +00002563 (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
Andrei Popescu31002712010-02-23 13:46:05 +00002564 }
2565
Ben Murdoch257744e2011-11-30 15:57:28 +00002566 // We should never get here, force a bad address if we do.
Andrei Popescu31002712010-02-23 13:46:05 +00002567 UNREACHABLE();
2568 return (Address)0x0;
2569}
2570
2571
Ben Murdochdb1b4382012-04-26 19:03:50 +01002572// MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
2573// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
2574// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
2575// OS::nan_value() returns a qNaN.
2576void Assembler::QuietNaN(HeapObject* object) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002577 HeapNumber::cast(object)->set_value(base::OS::nan_value());
Ben Murdochdb1b4382012-04-26 19:03:50 +01002578}
2579
2580
Ben Murdoch589d6972011-11-30 16:04:58 +00002581// On Mips, a target address is stored in a lui/ori instruction pair, each
2582// of which load 16 bits of the 32-bit address to a register.
2583// Patching the address must replace both instr, and flush the i-cache.
2584//
2585// There is an optimization below, which emits a nop when the address
2586// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2587// and possibly removed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002588void Assembler::set_target_address_at(Address pc,
2589 Address target,
2590 ICacheFlushMode icache_flush_mode) {
Andrei Popescu31002712010-02-23 13:46:05 +00002591 Instr instr2 = instr_at(pc + kInstrSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00002592 uint32_t rt_code = GetRtField(instr2);
Andrei Popescu31002712010-02-23 13:46:05 +00002593 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2594 uint32_t itarget = reinterpret_cast<uint32_t>(target);
2595
Ben Murdoch589d6972011-11-30 16:04:58 +00002596#ifdef DEBUG
2597 // Check we have the result from a li macro-instruction, using instr pair.
2598 Instr instr1 = instr_at(pc);
2599 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
2600#endif
2601
2602 // Must use 2 instructions to insure patchable code => just use lui and ori.
2603 // lui rt, upper-16.
2604 // ori rt rt, lower-16.
Ben Murdoch257744e2011-11-30 15:57:28 +00002605 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002606 *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
Andrei Popescu31002712010-02-23 13:46:05 +00002607
Ben Murdoch589d6972011-11-30 16:04:58 +00002608 // The following code is an optimization for the common case of Call()
2609 // or Jump() which is load to register, and jump through register:
2610 // li(t9, address); jalr(t9) (or jr(t9)).
2611 // If the destination address is in the same 256 MB page as the call, it
2612 // is faster to do a direct jal, or j, rather than jump thru register, since
2613 // that lets the cpu pipeline prefetch the target address. However each
2614 // time the address above is patched, we have to patch the direct jal/j
2615 // instruction, as well as possibly revert to jalr/jr if we now cross a
2616 // 256 MB page. Note that with the jal/j instructions, we do not need to
2617 // load the register, but that code is left, since it makes it easy to
2618 // revert this process. A further optimization could try replacing the
2619 // li sequence with nops.
2620 // This optimization can only be applied if the rt-code from instr2 is the
2621 // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
2622 // mips return. Occasionally this lands after an li().
2623
2624 Instr instr3 = instr_at(pc + 2 * kInstrSize);
2625 uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002626 bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
2627 uint32_t target_field =
2628 static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
Ben Murdoch589d6972011-11-30 16:04:58 +00002629 bool patched_jump = false;
2630
2631#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
2632 // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
2633 // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
2634 // apply this workaround for all cores so we don't have to identify the core.
2635 if (in_range) {
2636 // The 24k core E156 bug has some very specific requirements, we only check
2637 // the most simple one: if the address of the delay slot instruction is in
2638 // the first or last 32 KB of the 256 MB segment.
2639 uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
2640 uint32_t ipc_segment_addr = ipc & segment_mask;
2641 if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2642 in_range = false;
2643 }
2644#endif
2645
2646 if (IsJalr(instr3)) {
2647 // Try to convert JALR to JAL.
2648 if (in_range && GetRt(instr2) == GetRs(instr3)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002649 *(p + 2) = JAL | target_field;
Ben Murdoch589d6972011-11-30 16:04:58 +00002650 patched_jump = true;
2651 }
2652 } else if (IsJr(instr3)) {
2653 // Try to convert JR to J, skip returns (jr ra).
2654 bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
2655 if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002656 *(p + 2) = J | target_field;
Ben Murdoch589d6972011-11-30 16:04:58 +00002657 patched_jump = true;
2658 }
2659 } else if (IsJal(instr3)) {
2660 if (in_range) {
2661 // We are patching an already converted JAL.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002662 *(p + 2) = JAL | target_field;
Ben Murdoch589d6972011-11-30 16:04:58 +00002663 } else {
2664 // Patch JAL, but out of range, revert to JALR.
2665 // JALR rs reg is the rt reg specified in the ORI instruction.
2666 uint32_t rs_field = GetRt(instr2) << kRsShift;
2667 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2668 *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2669 }
2670 patched_jump = true;
2671 } else if (IsJ(instr3)) {
2672 if (in_range) {
2673 // We are patching an already converted J (jump).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002674 *(p + 2) = J | target_field;
Ben Murdoch589d6972011-11-30 16:04:58 +00002675 } else {
2676 // Trying patch J, but out of range, just go back to JR.
2677 // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
2678 uint32_t rs_field = GetRt(instr2) << kRsShift;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002679 if (IsMipsArchVariant(kMips32r6)) {
2680 *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
2681 } else {
2682 *(p + 2) = SPECIAL | rs_field | JR;
2683 }
Ben Murdoch589d6972011-11-30 16:04:58 +00002684 }
2685 patched_jump = true;
2686 }
2687
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002688 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
2689 CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
2690 }
Andrei Popescu31002712010-02-23 13:46:05 +00002691}
2692
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002693
Ben Murdoch589d6972011-11-30 16:04:58 +00002694void Assembler::JumpLabelToJumpRegister(Address pc) {
2695 // Address pc points to lui/ori instructions.
2696 // Jump to label may follow at pc + 2 * kInstrSize.
2697 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2698#ifdef DEBUG
2699 Instr instr1 = instr_at(pc);
2700#endif
2701 Instr instr2 = instr_at(pc + 1 * kInstrSize);
2702 Instr instr3 = instr_at(pc + 2 * kInstrSize);
2703 bool patched = false;
2704
2705 if (IsJal(instr3)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002706 DCHECK(GetOpcodeField(instr1) == LUI);
2707 DCHECK(GetOpcodeField(instr2) == ORI);
Ben Murdoch589d6972011-11-30 16:04:58 +00002708
2709 uint32_t rs_field = GetRt(instr2) << kRsShift;
2710 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002711 *(p + 2) = SPECIAL | rs_field | rd_field | JALR;
Ben Murdoch589d6972011-11-30 16:04:58 +00002712 patched = true;
2713 } else if (IsJ(instr3)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002714 DCHECK(GetOpcodeField(instr1) == LUI);
2715 DCHECK(GetOpcodeField(instr2) == ORI);
Ben Murdoch589d6972011-11-30 16:04:58 +00002716
2717 uint32_t rs_field = GetRt(instr2) << kRsShift;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002718 if (IsMipsArchVariant(kMips32r6)) {
2719 *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
2720 } else {
2721 *(p + 2) = SPECIAL | rs_field | JR;
2722 }
Ben Murdoch589d6972011-11-30 16:04:58 +00002723 patched = true;
2724 }
2725
2726 if (patched) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002727 CpuFeatures::FlushICache(pc + 2, sizeof(Address));
Ben Murdoch589d6972011-11-30 16:04:58 +00002728 }
2729}
Andrei Popescu31002712010-02-23 13:46:05 +00002730
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002731
2732Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2733 // No out-of-line constant pool support.
2734 DCHECK(!FLAG_enable_ool_constant_pool);
2735 return isolate->factory()->empty_constant_pool_array();
2736}
2737
2738
2739void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2740 // No out-of-line constant pool support.
2741 DCHECK(!FLAG_enable_ool_constant_pool);
2742 return;
2743}
2744
2745
Andrei Popescu31002712010-02-23 13:46:05 +00002746} } // namespace v8::internal
2747
Leon Clarkef7060e22010-06-03 12:02:55 +01002748#endif // V8_TARGET_ARCH_MIPS