blob: e01a0ca70bce0387900161dda1aeb31eca3063e1 [file] [log] [blame]
Andrei Popescu31002712010-02-23 13:46:05 +00001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
Ben Murdoch257744e2011-11-30 15:57:28 +000033// Copyright 2011 the V8 project authors. All rights reserved.
Andrei Popescu31002712010-02-23 13:46:05 +000034
35
36#include "v8.h"
Leon Clarkef7060e22010-06-03 12:02:55 +010037
38#if defined(V8_TARGET_ARCH_MIPS)
39
Andrei Popescu31002712010-02-23 13:46:05 +000040#include "mips/assembler-mips-inl.h"
41#include "serialize.h"
42
Andrei Popescu31002712010-02-23 13:46:05 +000043namespace v8 {
44namespace internal {
45
Ben Murdoch257744e2011-11-30 15:57:28 +000046#ifdef DEBUG
47bool CpuFeatures::initialized_ = false;
48#endif
49unsigned CpuFeatures::supported_ = 0;
50unsigned CpuFeatures::found_by_runtime_probing_ = 0;
Andrei Popescu31002712010-02-23 13:46:05 +000051
Ben Murdoch589d6972011-11-30 16:04:58 +000052
53// Get the CPU features enabled by the build. For cross compilation the
54// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
55// can be defined to enable FPU instructions when building the
56// snapshot.
57static uint64_t CpuFeaturesImpliedByCompiler() {
58 uint64_t answer = 0;
59#ifdef CAN_USE_FPU_INSTRUCTIONS
60 answer |= 1u << FPU;
61#endif // def CAN_USE_FPU_INSTRUCTIONS
62
63#ifdef __mips__
64 // If the compiler is allowed to use FPU then we can use FPU too in our code
65 // generation even when generating snapshots. This won't work for cross
66 // compilation.
67#if(defined(__mips_hard_float) && __mips_hard_float != 0)
68 answer |= 1u << FPU;
69#endif // defined(__mips_hard_float) && __mips_hard_float != 0
70#endif // def __mips__
71
72 return answer;
73}
74
75
Ben Murdoch257744e2011-11-30 15:57:28 +000076void CpuFeatures::Probe() {
77 ASSERT(!initialized_);
78#ifdef DEBUG
79 initialized_ = true;
80#endif
Ben Murdoch589d6972011-11-30 16:04:58 +000081
82 // Get the features implied by the OS and the compiler settings. This is the
83 // minimal set of features which is also allowed for generated code in the
84 // snapshot.
85 supported_ |= OS::CpuFeaturesImpliedByPlatform();
86 supported_ |= CpuFeaturesImpliedByCompiler();
87
88 if (Serializer::enabled()) {
89 // No probing for features if we might serialize (generate snapshot).
90 return;
91 }
92
Steve Block44f0eee2011-05-26 01:26:41 +010093 // If the compiler is allowed to use fpu then we can use fpu too in our
94 // code generation.
95#if !defined(__mips__)
96 // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
97 if (FLAG_enable_fpu) {
98 supported_ |= 1u << FPU;
99 }
100#else
Ben Murdoch589d6972011-11-30 16:04:58 +0000101 // Probe for additional features not already known to be available.
Steve Block44f0eee2011-05-26 01:26:41 +0100102 if (OS::MipsCpuHasFeature(FPU)) {
103 // This implementation also sets the FPU flags if
104 // runtime detection of FPU returns true.
105 supported_ |= 1u << FPU;
106 found_by_runtime_probing_ |= 1u << FPU;
107 }
Steve Block44f0eee2011-05-26 01:26:41 +0100108#endif
109}
Andrei Popescu31002712010-02-23 13:46:05 +0000110
111
Andrei Popescu31002712010-02-23 13:46:05 +0000112int ToNumber(Register reg) {
113 ASSERT(reg.is_valid());
114 const int kNumbers[] = {
115 0, // zero_reg
116 1, // at
117 2, // v0
118 3, // v1
119 4, // a0
120 5, // a1
121 6, // a2
122 7, // a3
123 8, // t0
124 9, // t1
125 10, // t2
126 11, // t3
127 12, // t4
128 13, // t5
129 14, // t6
130 15, // t7
131 16, // s0
132 17, // s1
133 18, // s2
134 19, // s3
135 20, // s4
136 21, // s5
137 22, // s6
138 23, // s7
139 24, // t8
140 25, // t9
141 26, // k0
142 27, // k1
143 28, // gp
144 29, // sp
145 30, // s8_fp
146 31, // ra
147 };
148 return kNumbers[reg.code()];
149}
150
Steve Block44f0eee2011-05-26 01:26:41 +0100151
Andrei Popescu31002712010-02-23 13:46:05 +0000152Register ToRegister(int num) {
153 ASSERT(num >= 0 && num < kNumRegisters);
154 const Register kRegisters[] = {
155 zero_reg,
156 at,
157 v0, v1,
158 a0, a1, a2, a3,
159 t0, t1, t2, t3, t4, t5, t6, t7,
160 s0, s1, s2, s3, s4, s5, s6, s7,
161 t8, t9,
162 k0, k1,
163 gp,
164 sp,
165 s8_fp,
166 ra
167 };
168 return kRegisters[num];
169}
170
171
172// -----------------------------------------------------------------------------
173// Implementation of RelocInfo.
174
Ben Murdoch589d6972011-11-30 16:04:58 +0000175const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
176 1 << RelocInfo::INTERNAL_REFERENCE;
Andrei Popescu31002712010-02-23 13:46:05 +0000177
Steve Block44f0eee2011-05-26 01:26:41 +0100178
179bool RelocInfo::IsCodedSpecially() {
180 // The deserializer needs to know whether a pointer is specially coded. Being
181 // specially coded on MIPS means that it is a lui/ori instruction, and that is
182 // always the case inside code objects.
183 return true;
184}
185
186
Andrei Popescu31002712010-02-23 13:46:05 +0000187// Patch the code at the current address with the supplied instructions.
188void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
189 Instr* pc = reinterpret_cast<Instr*>(pc_);
190 Instr* instr = reinterpret_cast<Instr*>(instructions);
191 for (int i = 0; i < instruction_count; i++) {
192 *(pc + i) = *(instr + i);
193 }
194
195 // Indicate that code has changed.
196 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
197}
198
199
200// Patch the code at the current PC with a call to the target address.
201// Additional guard instructions can be added if required.
202void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
203 // Patch the code at the current address with a call to the target.
204 UNIMPLEMENTED_MIPS();
205}
206
207
208// -----------------------------------------------------------------------------
209// Implementation of Operand and MemOperand.
210// See assembler-mips-inl.h for inlined constructors.
211
212Operand::Operand(Handle<Object> handle) {
213 rm_ = no_reg;
214 // Verify all Objects referred by code are NOT in new space.
215 Object* obj = *handle;
Steve Block44f0eee2011-05-26 01:26:41 +0100216 ASSERT(!HEAP->InNewSpace(obj));
Andrei Popescu31002712010-02-23 13:46:05 +0000217 if (obj->IsHeapObject()) {
218 imm32_ = reinterpret_cast<intptr_t>(handle.location());
219 rmode_ = RelocInfo::EMBEDDED_OBJECT;
220 } else {
221 // No relocation needed.
222 imm32_ = reinterpret_cast<intptr_t>(obj);
223 rmode_ = RelocInfo::NONE;
224 }
225}
226
Steve Block44f0eee2011-05-26 01:26:41 +0100227
228MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
Andrei Popescu31002712010-02-23 13:46:05 +0000229 offset_ = offset;
230}
231
232
233// -----------------------------------------------------------------------------
Steve Block44f0eee2011-05-26 01:26:41 +0100234// Specific instructions, constants, and masks.
Andrei Popescu31002712010-02-23 13:46:05 +0000235
Steve Block44f0eee2011-05-26 01:26:41 +0100236static const int kNegOffset = 0x00008000;
237// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
238// operations as post-increment of sp.
239const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift)
240 | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask);
241// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
242const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift)
243 | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask);
244// sw(r, MemOperand(sp, 0))
245const Instr kPushRegPattern = SW | (sp.code() << kRsShift)
246 | (0 & kImm16Mask);
247// lw(r, MemOperand(sp, 0))
248const Instr kPopRegPattern = LW | (sp.code() << kRsShift)
249 | (0 & kImm16Mask);
Andrei Popescu31002712010-02-23 13:46:05 +0000250
Steve Block44f0eee2011-05-26 01:26:41 +0100251const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift)
252 | (0 & kImm16Mask);
253
254const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift)
255 | (0 & kImm16Mask);
256
257const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift)
258 | (kNegOffset & kImm16Mask);
259
260const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift)
261 | (kNegOffset & kImm16Mask);
262// A mask for the Rt register for push, pop, lw, sw instructions.
263const Instr kRtMask = kRtFieldMask;
264const Instr kLwSwInstrTypeMask = 0xffe00000;
265const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
266const Instr kLwSwOffsetMask = kImm16Mask;
267
268
269// Spare buffer.
270static const int kMinimalBufferSize = 4 * KB;
271
272
Ben Murdoch257744e2011-11-30 15:57:28 +0000273Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
274 : AssemblerBase(arg_isolate),
Steve Block44f0eee2011-05-26 01:26:41 +0100275 positions_recorder_(this),
Ben Murdoch257744e2011-11-30 15:57:28 +0000276 emit_debug_code_(FLAG_debug_code) {
Andrei Popescu31002712010-02-23 13:46:05 +0000277 if (buffer == NULL) {
278 // Do our own buffer management.
279 if (buffer_size <= kMinimalBufferSize) {
280 buffer_size = kMinimalBufferSize;
281
Steve Block44f0eee2011-05-26 01:26:41 +0100282 if (isolate()->assembler_spare_buffer() != NULL) {
283 buffer = isolate()->assembler_spare_buffer();
284 isolate()->set_assembler_spare_buffer(NULL);
Andrei Popescu31002712010-02-23 13:46:05 +0000285 }
286 }
287 if (buffer == NULL) {
288 buffer_ = NewArray<byte>(buffer_size);
289 } else {
290 buffer_ = static_cast<byte*>(buffer);
291 }
292 buffer_size_ = buffer_size;
293 own_buffer_ = true;
294
295 } else {
296 // Use externally provided buffer instead.
297 ASSERT(buffer_size > 0);
298 buffer_ = static_cast<byte*>(buffer);
299 buffer_size_ = buffer_size;
300 own_buffer_ = false;
301 }
302
303 // Setup buffer pointers.
304 ASSERT(buffer_ != NULL);
305 pc_ = buffer_;
306 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
Steve Block44f0eee2011-05-26 01:26:41 +0100307
308 last_trampoline_pool_end_ = 0;
309 no_trampoline_pool_before_ = 0;
310 trampoline_pool_blocked_nesting_ = 0;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000311 // We leave space (16 * kTrampolineSlotsSize)
312 // for BlockTrampolinePoolScope buffer.
313 next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
Ben Murdoch257744e2011-11-30 15:57:28 +0000314 internal_trampoline_exception_ = false;
315 last_bound_pos_ = 0;
316
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000317 trampoline_emitted_ = false;
318 unbound_labels_count_ = 0;
319 block_buffer_growth_ = false;
320
321 ClearRecordedAstId();
Andrei Popescu31002712010-02-23 13:46:05 +0000322}
323
324
325Assembler::~Assembler() {
326 if (own_buffer_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100327 if (isolate()->assembler_spare_buffer() == NULL &&
Ben Murdoch257744e2011-11-30 15:57:28 +0000328 buffer_size_ == kMinimalBufferSize) {
Steve Block44f0eee2011-05-26 01:26:41 +0100329 isolate()->set_assembler_spare_buffer(buffer_);
Andrei Popescu31002712010-02-23 13:46:05 +0000330 } else {
331 DeleteArray(buffer_);
332 }
333 }
334}
335
336
337void Assembler::GetCode(CodeDesc* desc) {
Steve Block44f0eee2011-05-26 01:26:41 +0100338 ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
Andrei Popescu31002712010-02-23 13:46:05 +0000339 // Setup code descriptor.
340 desc->buffer = buffer_;
341 desc->buffer_size = buffer_size_;
342 desc->instr_size = pc_offset();
343 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
344}
345
346
Steve Block44f0eee2011-05-26 01:26:41 +0100347void Assembler::Align(int m) {
348 ASSERT(m >= 4 && IsPowerOf2(m));
349 while ((pc_offset() & (m - 1)) != 0) {
350 nop();
351 }
352}
353
354
355void Assembler::CodeTargetAlign() {
356 // No advantage to aligning branch/call targets to more than
357 // single instruction, that I am aware of.
358 Align(4);
359}
360
361
Ben Murdoch257744e2011-11-30 15:57:28 +0000362Register Assembler::GetRtReg(Instr instr) {
Steve Block44f0eee2011-05-26 01:26:41 +0100363 Register rt;
Ben Murdoch257744e2011-11-30 15:57:28 +0000364 rt.code_ = (instr & kRtFieldMask) >> kRtShift;
Steve Block44f0eee2011-05-26 01:26:41 +0100365 return rt;
366}
367
368
Ben Murdoch257744e2011-11-30 15:57:28 +0000369Register Assembler::GetRsReg(Instr instr) {
370 Register rs;
371 rs.code_ = (instr & kRsFieldMask) >> kRsShift;
372 return rs;
373}
374
375
376Register Assembler::GetRdReg(Instr instr) {
377 Register rd;
378 rd.code_ = (instr & kRdFieldMask) >> kRdShift;
379 return rd;
380}
381
382
383uint32_t Assembler::GetRt(Instr instr) {
384 return (instr & kRtFieldMask) >> kRtShift;
385}
386
387
388uint32_t Assembler::GetRtField(Instr instr) {
389 return instr & kRtFieldMask;
390}
391
392
393uint32_t Assembler::GetRs(Instr instr) {
394 return (instr & kRsFieldMask) >> kRsShift;
395}
396
397
398uint32_t Assembler::GetRsField(Instr instr) {
399 return instr & kRsFieldMask;
400}
401
402
403uint32_t Assembler::GetRd(Instr instr) {
404 return (instr & kRdFieldMask) >> kRdShift;
405}
406
407
408uint32_t Assembler::GetRdField(Instr instr) {
409 return instr & kRdFieldMask;
410}
411
412
413uint32_t Assembler::GetSa(Instr instr) {
414 return (instr & kSaFieldMask) >> kSaShift;
415}
416
417
418uint32_t Assembler::GetSaField(Instr instr) {
419 return instr & kSaFieldMask;
420}
421
422
423uint32_t Assembler::GetOpcodeField(Instr instr) {
424 return instr & kOpcodeMask;
425}
426
427
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000428uint32_t Assembler::GetFunction(Instr instr) {
429 return (instr & kFunctionFieldMask) >> kFunctionShift;
430}
431
432
433uint32_t Assembler::GetFunctionField(Instr instr) {
434 return instr & kFunctionFieldMask;
435}
436
437
Ben Murdoch257744e2011-11-30 15:57:28 +0000438uint32_t Assembler::GetImmediate16(Instr instr) {
439 return instr & kImm16Mask;
440}
441
442
443uint32_t Assembler::GetLabelConst(Instr instr) {
444 return instr & ~kImm16Mask;
445}
446
447
Steve Block44f0eee2011-05-26 01:26:41 +0100448bool Assembler::IsPop(Instr instr) {
449 return (instr & ~kRtMask) == kPopRegPattern;
450}
451
452
453bool Assembler::IsPush(Instr instr) {
454 return (instr & ~kRtMask) == kPushRegPattern;
455}
456
457
458bool Assembler::IsSwRegFpOffset(Instr instr) {
459 return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
460}
461
462
463bool Assembler::IsLwRegFpOffset(Instr instr) {
464 return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
465}
466
467
468bool Assembler::IsSwRegFpNegOffset(Instr instr) {
469 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
470 kSwRegFpNegOffsetPattern);
471}
472
473
474bool Assembler::IsLwRegFpNegOffset(Instr instr) {
475 return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
476 kLwRegFpNegOffsetPattern);
477}
478
479
Andrei Popescu31002712010-02-23 13:46:05 +0000480// Labels refer to positions in the (to be) generated code.
481// There are bound, linked, and unused labels.
482//
483// Bound labels refer to known positions in the already
484// generated code. pos() is the position the label refers to.
485//
486// Linked labels refer to unknown positions in the code
487// to be generated; pos() is the position of the last
488// instruction using the label.
489
Steve Block44f0eee2011-05-26 01:26:41 +0100490// The link chain is terminated by a value in the instruction of -1,
491// which is an otherwise illegal value (branch -1 is inf loop).
492// The instruction 16-bit offset field addresses 32-bit words, but in
493// code is conv to an 18-bit value addressing bytes, hence the -4 value.
Andrei Popescu31002712010-02-23 13:46:05 +0000494
Andrei Popescu31002712010-02-23 13:46:05 +0000495const int kEndOfChain = -4;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000496// Determines the end of the Jump chain (a subset of the label link chain).
497const int kEndOfJumpChain = 0;
Andrei Popescu31002712010-02-23 13:46:05 +0000498
Steve Block44f0eee2011-05-26 01:26:41 +0100499
500bool Assembler::IsBranch(Instr instr) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000501 uint32_t opcode = GetOpcodeField(instr);
502 uint32_t rt_field = GetRtField(instr);
503 uint32_t rs_field = GetRsField(instr);
504 uint32_t label_constant = GetLabelConst(instr);
Andrei Popescu31002712010-02-23 13:46:05 +0000505 // Checks if the instruction is a branch.
506 return opcode == BEQ ||
507 opcode == BNE ||
508 opcode == BLEZ ||
509 opcode == BGTZ ||
510 opcode == BEQL ||
511 opcode == BNEL ||
512 opcode == BLEZL ||
Ben Murdoch257744e2011-11-30 15:57:28 +0000513 opcode == BGTZL ||
Andrei Popescu31002712010-02-23 13:46:05 +0000514 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
515 rt_field == BLTZAL || rt_field == BGEZAL)) ||
Steve Block44f0eee2011-05-26 01:26:41 +0100516 (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
517 label_constant == 0; // Emitted label const in reg-exp engine.
518}
519
520
Ben Murdoch257744e2011-11-30 15:57:28 +0000521bool Assembler::IsBeq(Instr instr) {
522 return GetOpcodeField(instr) == BEQ;
523}
524
525
526bool Assembler::IsBne(Instr instr) {
527 return GetOpcodeField(instr) == BNE;
528}
529
530
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000531bool Assembler::IsJump(Instr instr) {
532 uint32_t opcode = GetOpcodeField(instr);
533 uint32_t rt_field = GetRtField(instr);
534 uint32_t rd_field = GetRdField(instr);
535 uint32_t function_field = GetFunctionField(instr);
536 // Checks if the instruction is a jump.
537 return opcode == J || opcode == JAL ||
538 (opcode == SPECIAL && rt_field == 0 &&
539 ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
540}
541
542
543bool Assembler::IsJ(Instr instr) {
544 uint32_t opcode = GetOpcodeField(instr);
545 // Checks if the instruction is a jump.
546 return opcode == J;
547}
548
549
Ben Murdoch589d6972011-11-30 16:04:58 +0000550bool Assembler::IsJal(Instr instr) {
551 return GetOpcodeField(instr) == JAL;
552}
553
554bool Assembler::IsJr(Instr instr) {
555 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
556}
557
558bool Assembler::IsJalr(Instr instr) {
559 return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
560}
561
562
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000563bool Assembler::IsLui(Instr instr) {
564 uint32_t opcode = GetOpcodeField(instr);
565 // Checks if the instruction is a load upper immediate.
566 return opcode == LUI;
567}
568
569
570bool Assembler::IsOri(Instr instr) {
571 uint32_t opcode = GetOpcodeField(instr);
572 // Checks if the instruction is a load upper immediate.
573 return opcode == ORI;
574}
575
576
Steve Block44f0eee2011-05-26 01:26:41 +0100577bool Assembler::IsNop(Instr instr, unsigned int type) {
578 // See Assembler::nop(type).
579 ASSERT(type < 32);
Ben Murdoch257744e2011-11-30 15:57:28 +0000580 uint32_t opcode = GetOpcodeField(instr);
581 uint32_t rt = GetRt(instr);
582 uint32_t rs = GetRs(instr);
583 uint32_t sa = GetSa(instr);
Steve Block44f0eee2011-05-26 01:26:41 +0100584
585 // nop(type) == sll(zero_reg, zero_reg, type);
586 // Technically all these values will be 0 but
587 // this makes more sense to the reader.
588
589 bool ret = (opcode == SLL &&
590 rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
591 rs == static_cast<uint32_t>(ToNumber(zero_reg)) &&
592 sa == type);
593
594 return ret;
595}
596
597
598int32_t Assembler::GetBranchOffset(Instr instr) {
599 ASSERT(IsBranch(instr));
600 return ((int16_t)(instr & kImm16Mask)) << 2;
601}
602
603
604bool Assembler::IsLw(Instr instr) {
605 return ((instr & kOpcodeMask) == LW);
606}
607
608
609int16_t Assembler::GetLwOffset(Instr instr) {
610 ASSERT(IsLw(instr));
611 return ((instr & kImm16Mask));
612}
613
614
615Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
616 ASSERT(IsLw(instr));
617
618 // We actually create a new lw instruction based on the original one.
619 Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
620 | (offset & kImm16Mask);
621
622 return temp_instr;
623}
624
625
626bool Assembler::IsSw(Instr instr) {
627 return ((instr & kOpcodeMask) == SW);
628}
629
630
631Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
632 ASSERT(IsSw(instr));
633 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
634}
635
636
637bool Assembler::IsAddImmediate(Instr instr) {
638 return ((instr & kOpcodeMask) == ADDIU);
639}
640
641
642Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
643 ASSERT(IsAddImmediate(instr));
644 return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +0000645}
646
647
Ben Murdoch257744e2011-11-30 15:57:28 +0000648bool Assembler::IsAndImmediate(Instr instr) {
649 return GetOpcodeField(instr) == ANDI;
650}
651
652
Andrei Popescu31002712010-02-23 13:46:05 +0000653int Assembler::target_at(int32_t pos) {
654 Instr instr = instr_at(pos);
655 if ((instr & ~kImm16Mask) == 0) {
656 // Emitted label constant, not part of a branch.
Steve Block44f0eee2011-05-26 01:26:41 +0100657 if (instr == 0) {
658 return kEndOfChain;
659 } else {
660 int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
661 return (imm18 + pos);
662 }
Andrei Popescu31002712010-02-23 13:46:05 +0000663 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000664 // Check we have a branch or jump instruction.
665 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
Andrei Popescu31002712010-02-23 13:46:05 +0000666 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
667 // the compiler uses arithmectic shifts for signed integers.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000668 if (IsBranch(instr)) {
669 int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
Andrei Popescu31002712010-02-23 13:46:05 +0000670
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000671 if (imm18 == kEndOfChain) {
672 // EndOfChain sentinel is returned directly, not relative to pc or pos.
673 return kEndOfChain;
674 } else {
675 return pos + kBranchPCOffset + imm18;
676 }
677 } else if (IsLui(instr)) {
678 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
679 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
680 ASSERT(IsOri(instr_ori));
681 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
682 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
683
684 if (imm == kEndOfJumpChain) {
685 // EndOfChain sentinel is returned directly, not relative to pc or pos.
686 return kEndOfChain;
687 } else {
688 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
689 int32_t delta = instr_address - imm;
690 ASSERT(pos > delta);
691 return pos - delta;
692 }
Steve Block44f0eee2011-05-26 01:26:41 +0100693 } else {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000694 int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
695 if (imm28 == kEndOfJumpChain) {
696 // EndOfChain sentinel is returned directly, not relative to pc or pos.
697 return kEndOfChain;
698 } else {
699 uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
700 instr_address &= kImm28Mask;
701 int32_t delta = instr_address - imm28;
702 ASSERT(pos > delta);
703 return pos - delta;
704 }
Steve Block44f0eee2011-05-26 01:26:41 +0100705 }
Andrei Popescu31002712010-02-23 13:46:05 +0000706}
707
708
709void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
710 Instr instr = instr_at(pos);
711 if ((instr & ~kImm16Mask) == 0) {
712 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
713 // Emitted label constant, not part of a branch.
714 // Make label relative to Code* of generated Code object.
715 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
716 return;
717 }
718
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000719 ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
720 if (IsBranch(instr)) {
721 int32_t imm18 = target_pos - (pos + kBranchPCOffset);
722 ASSERT((imm18 & 3) == 0);
Andrei Popescu31002712010-02-23 13:46:05 +0000723
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000724 instr &= ~kImm16Mask;
725 int32_t imm16 = imm18 >> 2;
726 ASSERT(is_int16(imm16));
Andrei Popescu31002712010-02-23 13:46:05 +0000727
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000728 instr_at_put(pos, instr | (imm16 & kImm16Mask));
729 } else if (IsLui(instr)) {
730 Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
731 Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
732 ASSERT(IsOri(instr_ori));
733 uint32_t imm = (uint32_t)buffer_ + target_pos;
734 ASSERT((imm & 3) == 0);
735
736 instr_lui &= ~kImm16Mask;
737 instr_ori &= ~kImm16Mask;
738
739 instr_at_put(pos + 0 * Assembler::kInstrSize,
740 instr_lui | ((imm & kHiMask) >> kLuiShift));
741 instr_at_put(pos + 1 * Assembler::kInstrSize,
742 instr_ori | (imm & kImm16Mask));
743 } else {
744 uint32_t imm28 = (uint32_t)buffer_ + target_pos;
745 imm28 &= kImm28Mask;
746 ASSERT((imm28 & 3) == 0);
747
748 instr &= ~kImm26Mask;
749 uint32_t imm26 = imm28 >> 2;
750 ASSERT(is_uint26(imm26));
751
752 instr_at_put(pos, instr | (imm26 & kImm26Mask));
753 }
Andrei Popescu31002712010-02-23 13:46:05 +0000754}
755
756
757void Assembler::print(Label* L) {
758 if (L->is_unused()) {
759 PrintF("unused label\n");
760 } else if (L->is_bound()) {
761 PrintF("bound label to %d\n", L->pos());
762 } else if (L->is_linked()) {
763 Label l = *L;
764 PrintF("unbound label");
765 while (l.is_linked()) {
766 PrintF("@ %d ", l.pos());
767 Instr instr = instr_at(l.pos());
768 if ((instr & ~kImm16Mask) == 0) {
769 PrintF("value\n");
770 } else {
771 PrintF("%d\n", instr);
772 }
773 next(&l);
774 }
775 } else {
776 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
777 }
778}
779
780
781void Assembler::bind_to(Label* L, int pos) {
Steve Block44f0eee2011-05-26 01:26:41 +0100782 ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000783 int32_t trampoline_pos = kInvalidSlotPos;
784 if (L->is_linked() && !trampoline_emitted_) {
785 unbound_labels_count_--;
786 next_buffer_check_ += kTrampolineSlotsSize;
787 }
788
Andrei Popescu31002712010-02-23 13:46:05 +0000789 while (L->is_linked()) {
790 int32_t fixup_pos = L->pos();
Steve Block44f0eee2011-05-26 01:26:41 +0100791 int32_t dist = pos - fixup_pos;
792 next(L); // Call next before overwriting link with target at fixup_pos.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000793 Instr instr = instr_at(fixup_pos);
794 if (IsBranch(instr)) {
795 if (dist > kMaxBranchOffset) {
796 if (trampoline_pos == kInvalidSlotPos) {
797 trampoline_pos = get_trampoline_entry(fixup_pos);
798 CHECK(trampoline_pos != kInvalidSlotPos);
Ben Murdoch257744e2011-11-30 15:57:28 +0000799 }
Steve Block44f0eee2011-05-26 01:26:41 +0100800 ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
801 target_at_put(fixup_pos, trampoline_pos);
802 fixup_pos = trampoline_pos;
803 dist = pos - fixup_pos;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000804 }
805 target_at_put(fixup_pos, pos);
806 } else {
807 ASSERT(IsJ(instr) || IsLui(instr));
808 target_at_put(fixup_pos, pos);
809 }
Andrei Popescu31002712010-02-23 13:46:05 +0000810 }
811 L->bind_to(pos);
812
813 // Keep track of the last bound label so we don't eliminate any instructions
814 // before a bound label.
815 if (pos > last_bound_pos_)
816 last_bound_pos_ = pos;
817}
818
819
Andrei Popescu31002712010-02-23 13:46:05 +0000820void Assembler::bind(Label* L) {
Steve Block44f0eee2011-05-26 01:26:41 +0100821 ASSERT(!L->is_bound()); // Label can only be bound once.
Andrei Popescu31002712010-02-23 13:46:05 +0000822 bind_to(L, pc_offset());
823}
824
825
826void Assembler::next(Label* L) {
827 ASSERT(L->is_linked());
828 int link = target_at(L->pos());
Steve Block44f0eee2011-05-26 01:26:41 +0100829 if (link == kEndOfChain) {
Andrei Popescu31002712010-02-23 13:46:05 +0000830 L->Unuse();
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000831 } else {
832 ASSERT(link >= 0);
Steve Block44f0eee2011-05-26 01:26:41 +0100833 L->link_to(link);
Andrei Popescu31002712010-02-23 13:46:05 +0000834 }
835}
836
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000837bool Assembler::is_near(Label* L) {
838 if (L->is_bound()) {
839 return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
840 }
841 return false;
842}
Andrei Popescu31002712010-02-23 13:46:05 +0000843
844// We have to use a temporary register for things that can be relocated even
845// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
846// space. There is no guarantee that the relocated location can be similarly
847// encoded.
Steve Block44f0eee2011-05-26 01:26:41 +0100848bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
849 return rmode != RelocInfo::NONE;
Andrei Popescu31002712010-02-23 13:46:05 +0000850}
851
852
853void Assembler::GenInstrRegister(Opcode opcode,
854 Register rs,
855 Register rt,
856 Register rd,
857 uint16_t sa,
858 SecondaryField func) {
859 ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
860 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
861 | (rd.code() << kRdShift) | (sa << kSaShift) | func;
862 emit(instr);
863}
864
865
866void Assembler::GenInstrRegister(Opcode opcode,
Steve Block44f0eee2011-05-26 01:26:41 +0100867 Register rs,
868 Register rt,
869 uint16_t msb,
870 uint16_t lsb,
871 SecondaryField func) {
872 ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
873 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
874 | (msb << kRdShift) | (lsb << kSaShift) | func;
875 emit(instr);
876}
877
878
879void Assembler::GenInstrRegister(Opcode opcode,
Andrei Popescu31002712010-02-23 13:46:05 +0000880 SecondaryField fmt,
881 FPURegister ft,
882 FPURegister fs,
883 FPURegister fd,
884 SecondaryField func) {
885 ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
Ben Murdoch257744e2011-11-30 15:57:28 +0000886 ASSERT(CpuFeatures::IsEnabled(FPU));
Steve Block44f0eee2011-05-26 01:26:41 +0100887 Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
888 | (fd.code() << kFdShift) | func;
Andrei Popescu31002712010-02-23 13:46:05 +0000889 emit(instr);
890}
891
892
893void Assembler::GenInstrRegister(Opcode opcode,
894 SecondaryField fmt,
895 Register rt,
896 FPURegister fs,
897 FPURegister fd,
898 SecondaryField func) {
899 ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
Ben Murdoch257744e2011-11-30 15:57:28 +0000900 ASSERT(CpuFeatures::IsEnabled(FPU));
Andrei Popescu31002712010-02-23 13:46:05 +0000901 Instr instr = opcode | fmt | (rt.code() << kRtShift)
Steve Block44f0eee2011-05-26 01:26:41 +0100902 | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
903 emit(instr);
904}
905
906
907void Assembler::GenInstrRegister(Opcode opcode,
908 SecondaryField fmt,
909 Register rt,
910 FPUControlRegister fs,
911 SecondaryField func) {
912 ASSERT(fs.is_valid() && rt.is_valid());
Ben Murdoch257744e2011-11-30 15:57:28 +0000913 ASSERT(CpuFeatures::IsEnabled(FPU));
Steve Block44f0eee2011-05-26 01:26:41 +0100914 Instr instr =
915 opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
Andrei Popescu31002712010-02-23 13:46:05 +0000916 emit(instr);
917}
918
919
920// Instructions with immediate value.
921// Registers are in the order of the instruction encoding, from left to right.
922void Assembler::GenInstrImmediate(Opcode opcode,
923 Register rs,
924 Register rt,
925 int32_t j) {
926 ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
927 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
928 | (j & kImm16Mask);
929 emit(instr);
930}
931
932
933void Assembler::GenInstrImmediate(Opcode opcode,
934 Register rs,
935 SecondaryField SF,
936 int32_t j) {
937 ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
938 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
939 emit(instr);
940}
941
942
943void Assembler::GenInstrImmediate(Opcode opcode,
944 Register rs,
945 FPURegister ft,
946 int32_t j) {
947 ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
Ben Murdoch257744e2011-11-30 15:57:28 +0000948 ASSERT(CpuFeatures::IsEnabled(FPU));
Andrei Popescu31002712010-02-23 13:46:05 +0000949 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
950 | (j & kImm16Mask);
951 emit(instr);
952}
953
954
Andrei Popescu31002712010-02-23 13:46:05 +0000955void Assembler::GenInstrJump(Opcode opcode,
Ben Murdoch589d6972011-11-30 16:04:58 +0000956 uint32_t address) {
Steve Block44f0eee2011-05-26 01:26:41 +0100957 BlockTrampolinePoolScope block_trampoline_pool(this);
Andrei Popescu31002712010-02-23 13:46:05 +0000958 ASSERT(is_uint26(address));
959 Instr instr = opcode | address;
960 emit(instr);
Steve Block44f0eee2011-05-26 01:26:41 +0100961 BlockTrampolinePoolFor(1); // For associated delay slot.
962}
963
964
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000965// Returns the next free trampoline entry.
966int32_t Assembler::get_trampoline_entry(int32_t pos) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000967 int32_t trampoline_entry = kInvalidSlotPos;
Steve Block44f0eee2011-05-26 01:26:41 +0100968
Ben Murdoch257744e2011-11-30 15:57:28 +0000969 if (!internal_trampoline_exception_) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000970 if (trampoline_.start() > pos) {
971 trampoline_entry = trampoline_.take_slot();
Steve Block44f0eee2011-05-26 01:26:41 +0100972 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000973
Ben Murdoch257744e2011-11-30 15:57:28 +0000974 if (kInvalidSlotPos == trampoline_entry) {
975 internal_trampoline_exception_ = true;
Steve Block44f0eee2011-05-26 01:26:41 +0100976 }
977 }
978 return trampoline_entry;
Andrei Popescu31002712010-02-23 13:46:05 +0000979}
980
981
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000982uint32_t Assembler::jump_address(Label* L) {
Andrei Popescu31002712010-02-23 13:46:05 +0000983 int32_t target_pos;
Steve Block44f0eee2011-05-26 01:26:41 +0100984
Andrei Popescu31002712010-02-23 13:46:05 +0000985 if (L->is_bound()) {
986 target_pos = L->pos();
987 } else {
988 if (L->is_linked()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100989 target_pos = L->pos(); // L's link.
Steve Block44f0eee2011-05-26 01:26:41 +0100990 L->link_to(pc_offset());
Andrei Popescu31002712010-02-23 13:46:05 +0000991 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100992 L->link_to(pc_offset());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000993 return kEndOfJumpChain;
994 }
995 }
996
997 uint32_t imm = (uint32_t)buffer_ + target_pos;
998 ASSERT((imm & 3) == 0);
999
1000 return imm;
1001}
1002
1003
1004int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1005 int32_t target_pos;
1006
1007 if (L->is_bound()) {
1008 target_pos = L->pos();
1009 } else {
1010 if (L->is_linked()) {
1011 target_pos = L->pos();
1012 L->link_to(pc_offset());
1013 } else {
1014 L->link_to(pc_offset());
1015 if (!trampoline_emitted_) {
1016 unbound_labels_count_++;
1017 next_buffer_check_ -= kTrampolineSlotsSize;
1018 }
Steve Block44f0eee2011-05-26 01:26:41 +01001019 return kEndOfChain;
Andrei Popescu31002712010-02-23 13:46:05 +00001020 }
Andrei Popescu31002712010-02-23 13:46:05 +00001021 }
1022
1023 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
Steve Block44f0eee2011-05-26 01:26:41 +01001024 ASSERT((offset & 3) == 0);
1025 ASSERT(is_int16(offset >> 2));
1026
Andrei Popescu31002712010-02-23 13:46:05 +00001027 return offset;
1028}
1029
1030
1031void Assembler::label_at_put(Label* L, int at_offset) {
1032 int target_pos;
1033 if (L->is_bound()) {
1034 target_pos = L->pos();
Steve Block44f0eee2011-05-26 01:26:41 +01001035 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
Andrei Popescu31002712010-02-23 13:46:05 +00001036 } else {
1037 if (L->is_linked()) {
Steve Block44f0eee2011-05-26 01:26:41 +01001038 target_pos = L->pos(); // L's link.
1039 int32_t imm18 = target_pos - at_offset;
1040 ASSERT((imm18 & 3) == 0);
1041 int32_t imm16 = imm18 >> 2;
1042 ASSERT(is_int16(imm16));
1043 instr_at_put(at_offset, (imm16 & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +00001044 } else {
1045 target_pos = kEndOfChain;
Steve Block44f0eee2011-05-26 01:26:41 +01001046 instr_at_put(at_offset, 0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001047 if (!trampoline_emitted_) {
1048 unbound_labels_count_++;
1049 next_buffer_check_ -= kTrampolineSlotsSize;
1050 }
Andrei Popescu31002712010-02-23 13:46:05 +00001051 }
1052 L->link_to(at_offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001053 }
1054}
1055
1056
1057//------- Branch and jump instructions --------
1058
1059void Assembler::b(int16_t offset) {
1060 beq(zero_reg, zero_reg, offset);
1061}
1062
1063
1064void Assembler::bal(int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001065 positions_recorder()->WriteRecordedPositions();
Andrei Popescu31002712010-02-23 13:46:05 +00001066 bgezal(zero_reg, offset);
1067}
1068
1069
1070void Assembler::beq(Register rs, Register rt, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001071 BlockTrampolinePoolScope block_trampoline_pool(this);
Andrei Popescu31002712010-02-23 13:46:05 +00001072 GenInstrImmediate(BEQ, rs, rt, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001073 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001074}
1075
1076
1077void Assembler::bgez(Register rs, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001078 BlockTrampolinePoolScope block_trampoline_pool(this);
Andrei Popescu31002712010-02-23 13:46:05 +00001079 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001080 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001081}
1082
1083
1084void Assembler::bgezal(Register rs, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001085 BlockTrampolinePoolScope block_trampoline_pool(this);
1086 positions_recorder()->WriteRecordedPositions();
Andrei Popescu31002712010-02-23 13:46:05 +00001087 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001088 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001089}
1090
1091
1092void Assembler::bgtz(Register rs, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001093 BlockTrampolinePoolScope block_trampoline_pool(this);
Andrei Popescu31002712010-02-23 13:46:05 +00001094 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001095 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001096}
1097
1098
1099void Assembler::blez(Register rs, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001100 BlockTrampolinePoolScope block_trampoline_pool(this);
Andrei Popescu31002712010-02-23 13:46:05 +00001101 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001102 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001103}
1104
1105
1106void Assembler::bltz(Register rs, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001107 BlockTrampolinePoolScope block_trampoline_pool(this);
Andrei Popescu31002712010-02-23 13:46:05 +00001108 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001109 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001110}
1111
1112
1113void Assembler::bltzal(Register rs, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001114 BlockTrampolinePoolScope block_trampoline_pool(this);
1115 positions_recorder()->WriteRecordedPositions();
Andrei Popescu31002712010-02-23 13:46:05 +00001116 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001117 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001118}
1119
1120
1121void Assembler::bne(Register rs, Register rt, int16_t offset) {
Steve Block44f0eee2011-05-26 01:26:41 +01001122 BlockTrampolinePoolScope block_trampoline_pool(this);
Andrei Popescu31002712010-02-23 13:46:05 +00001123 GenInstrImmediate(BNE, rs, rt, offset);
Steve Block44f0eee2011-05-26 01:26:41 +01001124 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001125}
1126
1127
1128void Assembler::j(int32_t target) {
Ben Murdoch589d6972011-11-30 16:04:58 +00001129#if DEBUG
1130 // Get pc of delay slot.
1131 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1132 bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1133 ASSERT(in_range && ((target & 3) == 0));
1134#endif
Andrei Popescu31002712010-02-23 13:46:05 +00001135 GenInstrJump(J, target >> 2);
1136}
1137
1138
1139void Assembler::jr(Register rs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001140 BlockTrampolinePoolScope block_trampoline_pool(this);
1141 if (rs.is(ra)) {
1142 positions_recorder()->WriteRecordedPositions();
1143 }
Andrei Popescu31002712010-02-23 13:46:05 +00001144 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
Steve Block44f0eee2011-05-26 01:26:41 +01001145 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001146}
1147
1148
1149void Assembler::jal(int32_t target) {
Ben Murdoch589d6972011-11-30 16:04:58 +00001150#ifdef DEBUG
1151 // Get pc of delay slot.
1152 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1153 bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1154 ASSERT(in_range && ((target & 3) == 0));
1155#endif
Steve Block44f0eee2011-05-26 01:26:41 +01001156 positions_recorder()->WriteRecordedPositions();
Andrei Popescu31002712010-02-23 13:46:05 +00001157 GenInstrJump(JAL, target >> 2);
1158}
1159
1160
1161void Assembler::jalr(Register rs, Register rd) {
Steve Block44f0eee2011-05-26 01:26:41 +01001162 BlockTrampolinePoolScope block_trampoline_pool(this);
1163 positions_recorder()->WriteRecordedPositions();
Andrei Popescu31002712010-02-23 13:46:05 +00001164 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
Steve Block44f0eee2011-05-26 01:26:41 +01001165 BlockTrampolinePoolFor(1); // For associated delay slot.
Andrei Popescu31002712010-02-23 13:46:05 +00001166}
1167
1168
Ben Murdoch589d6972011-11-30 16:04:58 +00001169void Assembler::j_or_jr(int32_t target, Register rs) {
1170 // Get pc of delay slot.
1171 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1172 bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1173
1174 if (in_range) {
1175 j(target);
1176 } else {
1177 jr(t9);
1178 }
1179}
1180
1181
1182void Assembler::jal_or_jalr(int32_t target, Register rs) {
1183 // Get pc of delay slot.
1184 uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1185 bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1186
1187 if (in_range) {
1188 jal(target);
1189 } else {
1190 jalr(t9);
1191 }
1192}
1193
1194
Andrei Popescu31002712010-02-23 13:46:05 +00001195//-------Data-processing-instructions---------
1196
1197// Arithmetic.
1198
Andrei Popescu31002712010-02-23 13:46:05 +00001199void Assembler::addu(Register rd, Register rs, Register rt) {
1200 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1201}
1202
1203
Andrei Popescu31002712010-02-23 13:46:05 +00001204void Assembler::addiu(Register rd, Register rs, int32_t j) {
1205 GenInstrImmediate(ADDIU, rs, rd, j);
Andrei Popescu31002712010-02-23 13:46:05 +00001206}
1207
1208
1209void Assembler::subu(Register rd, Register rs, Register rt) {
1210 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1211}
1212
1213
1214void Assembler::mul(Register rd, Register rs, Register rt) {
1215 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1216}
1217
1218
1219void Assembler::mult(Register rs, Register rt) {
1220 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1221}
1222
1223
1224void Assembler::multu(Register rs, Register rt) {
1225 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1226}
1227
1228
1229void Assembler::div(Register rs, Register rt) {
1230 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1231}
1232
1233
1234void Assembler::divu(Register rs, Register rt) {
1235 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1236}
1237
1238
1239// Logical.
1240
1241void Assembler::and_(Register rd, Register rs, Register rt) {
1242 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1243}
1244
1245
1246void Assembler::andi(Register rt, Register rs, int32_t j) {
1247 GenInstrImmediate(ANDI, rs, rt, j);
1248}
1249
1250
1251void Assembler::or_(Register rd, Register rs, Register rt) {
1252 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1253}
1254
1255
1256void Assembler::ori(Register rt, Register rs, int32_t j) {
1257 GenInstrImmediate(ORI, rs, rt, j);
1258}
1259
1260
1261void Assembler::xor_(Register rd, Register rs, Register rt) {
1262 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1263}
1264
1265
1266void Assembler::xori(Register rt, Register rs, int32_t j) {
1267 GenInstrImmediate(XORI, rs, rt, j);
1268}
1269
1270
1271void Assembler::nor(Register rd, Register rs, Register rt) {
1272 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1273}
1274
1275
1276// Shifts.
Steve Block44f0eee2011-05-26 01:26:41 +01001277void Assembler::sll(Register rd,
1278 Register rt,
1279 uint16_t sa,
1280 bool coming_from_nop) {
1281 // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1282 // generated using the sll instruction. They must be generated using
1283 // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1284 // instructions.
1285 ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
Andrei Popescu31002712010-02-23 13:46:05 +00001286 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1287}
1288
1289
1290void Assembler::sllv(Register rd, Register rt, Register rs) {
1291 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1292}
1293
1294
1295void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1296 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1297}
1298
1299
1300void Assembler::srlv(Register rd, Register rt, Register rs) {
1301 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1302}
1303
1304
1305void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1306 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1307}
1308
1309
1310void Assembler::srav(Register rd, Register rt, Register rs) {
1311 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1312}
1313
1314
Steve Block44f0eee2011-05-26 01:26:41 +01001315void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1316 // Should be called via MacroAssembler::Ror.
1317 ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1318 ASSERT(mips32r2);
1319 Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1320 | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1321 emit(instr);
1322}
1323
1324
1325void Assembler::rotrv(Register rd, Register rt, Register rs) {
1326 // Should be called via MacroAssembler::Ror.
1327 ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1328 ASSERT(mips32r2);
1329 Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1330 | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1331 emit(instr);
1332}
1333
1334
Andrei Popescu31002712010-02-23 13:46:05 +00001335//------------Memory-instructions-------------
1336
Steve Block44f0eee2011-05-26 01:26:41 +01001337// Helper for base-reg + offset, when offset is larger than int16.
1338void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1339 ASSERT(!src.rm().is(at));
1340 lui(at, src.offset_ >> kLuiShift);
1341 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1342 addu(at, at, src.rm()); // Add base register.
1343}
1344
1345
Andrei Popescu31002712010-02-23 13:46:05 +00001346void Assembler::lb(Register rd, const MemOperand& rs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001347 if (is_int16(rs.offset_)) {
1348 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1349 } else { // Offset > 16 bits, use multiple instructions to load.
1350 LoadRegPlusOffsetToAt(rs);
1351 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1352 }
Andrei Popescu31002712010-02-23 13:46:05 +00001353}
1354
1355
1356void Assembler::lbu(Register rd, const MemOperand& rs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001357 if (is_int16(rs.offset_)) {
1358 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1359 } else { // Offset > 16 bits, use multiple instructions to load.
1360 LoadRegPlusOffsetToAt(rs);
1361 GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1362 }
1363}
1364
1365
1366void Assembler::lh(Register rd, const MemOperand& rs) {
1367 if (is_int16(rs.offset_)) {
1368 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1369 } else { // Offset > 16 bits, use multiple instructions to load.
1370 LoadRegPlusOffsetToAt(rs);
1371 GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1372 }
1373}
1374
1375
1376void Assembler::lhu(Register rd, const MemOperand& rs) {
1377 if (is_int16(rs.offset_)) {
1378 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1379 } else { // Offset > 16 bits, use multiple instructions to load.
1380 LoadRegPlusOffsetToAt(rs);
1381 GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1382 }
Andrei Popescu31002712010-02-23 13:46:05 +00001383}
1384
1385
1386void Assembler::lw(Register rd, const MemOperand& rs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001387 if (is_int16(rs.offset_)) {
1388 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1389 } else { // Offset > 16 bits, use multiple instructions to load.
1390 LoadRegPlusOffsetToAt(rs);
1391 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1392 }
Steve Block44f0eee2011-05-26 01:26:41 +01001393}
1394
1395
1396void Assembler::lwl(Register rd, const MemOperand& rs) {
1397 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1398}
1399
1400
1401void Assembler::lwr(Register rd, const MemOperand& rs) {
1402 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
Andrei Popescu31002712010-02-23 13:46:05 +00001403}
1404
1405
1406void Assembler::sb(Register rd, const MemOperand& rs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001407 if (is_int16(rs.offset_)) {
1408 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1409 } else { // Offset > 16 bits, use multiple instructions to store.
1410 LoadRegPlusOffsetToAt(rs);
1411 GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1412 }
1413}
1414
1415
1416void Assembler::sh(Register rd, const MemOperand& rs) {
1417 if (is_int16(rs.offset_)) {
1418 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1419 } else { // Offset > 16 bits, use multiple instructions to store.
1420 LoadRegPlusOffsetToAt(rs);
1421 GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1422 }
Andrei Popescu31002712010-02-23 13:46:05 +00001423}
1424
1425
1426void Assembler::sw(Register rd, const MemOperand& rs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001427 if (is_int16(rs.offset_)) {
1428 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1429 } else { // Offset > 16 bits, use multiple instructions to store.
1430 LoadRegPlusOffsetToAt(rs);
1431 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1432 }
Steve Block44f0eee2011-05-26 01:26:41 +01001433}
1434
1435
1436void Assembler::swl(Register rd, const MemOperand& rs) {
1437 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1438}
1439
1440
1441void Assembler::swr(Register rd, const MemOperand& rs) {
1442 GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
Andrei Popescu31002712010-02-23 13:46:05 +00001443}
1444
1445
1446void Assembler::lui(Register rd, int32_t j) {
1447 GenInstrImmediate(LUI, zero_reg, rd, j);
1448}
1449
1450
1451//-------------Misc-instructions--------------
1452
1453// Break / Trap instructions.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001454void Assembler::break_(uint32_t code, bool break_as_stop) {
Andrei Popescu31002712010-02-23 13:46:05 +00001455 ASSERT((code & ~0xfffff) == 0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001456 // We need to invalidate breaks that could be stops as well because the
1457 // simulator expects a char pointer after the stop instruction.
1458 // See constants-mips.h for explanation.
1459 ASSERT((break_as_stop &&
1460 code <= kMaxStopCode &&
1461 code > kMaxWatchpointCode) ||
1462 (!break_as_stop &&
1463 (code > kMaxStopCode ||
1464 code <= kMaxWatchpointCode)));
Andrei Popescu31002712010-02-23 13:46:05 +00001465 Instr break_instr = SPECIAL | BREAK | (code << 6);
1466 emit(break_instr);
1467}
1468
1469
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001470void Assembler::stop(const char* msg, uint32_t code) {
1471 ASSERT(code > kMaxWatchpointCode);
1472 ASSERT(code <= kMaxStopCode);
1473#if defined(V8_HOST_ARCH_MIPS)
1474 break_(0x54321);
1475#else // V8_HOST_ARCH_MIPS
1476 BlockTrampolinePoolFor(2);
1477 // The Simulator will handle the stop instruction and get the message address.
1478 // On MIPS stop() is just a special kind of break_().
1479 break_(code, true);
1480 emit(reinterpret_cast<Instr>(msg));
1481#endif
1482}
1483
1484
Andrei Popescu31002712010-02-23 13:46:05 +00001485void Assembler::tge(Register rs, Register rt, uint16_t code) {
1486 ASSERT(is_uint10(code));
1487 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1488 | rt.code() << kRtShift | code << 6;
1489 emit(instr);
1490}
1491
1492
1493void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1494 ASSERT(is_uint10(code));
1495 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1496 | rt.code() << kRtShift | code << 6;
1497 emit(instr);
1498}
1499
1500
1501void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1502 ASSERT(is_uint10(code));
1503 Instr instr =
1504 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1505 emit(instr);
1506}
1507
1508
1509void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1510 ASSERT(is_uint10(code));
Steve Block44f0eee2011-05-26 01:26:41 +01001511 Instr instr =
1512 SPECIAL | TLTU | rs.code() << kRsShift
Andrei Popescu31002712010-02-23 13:46:05 +00001513 | rt.code() << kRtShift | code << 6;
1514 emit(instr);
1515}
1516
1517
1518void Assembler::teq(Register rs, Register rt, uint16_t code) {
1519 ASSERT(is_uint10(code));
1520 Instr instr =
1521 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1522 emit(instr);
1523}
1524
1525
1526void Assembler::tne(Register rs, Register rt, uint16_t code) {
1527 ASSERT(is_uint10(code));
1528 Instr instr =
1529 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1530 emit(instr);
1531}
1532
1533
1534// Move from HI/LO register.
1535
1536void Assembler::mfhi(Register rd) {
1537 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1538}
1539
1540
1541void Assembler::mflo(Register rd) {
1542 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1543}
1544
1545
1546// Set on less than instructions.
1547void Assembler::slt(Register rd, Register rs, Register rt) {
1548 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1549}
1550
1551
1552void Assembler::sltu(Register rd, Register rs, Register rt) {
1553 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1554}
1555
1556
1557void Assembler::slti(Register rt, Register rs, int32_t j) {
1558 GenInstrImmediate(SLTI, rs, rt, j);
1559}
1560
1561
1562void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1563 GenInstrImmediate(SLTIU, rs, rt, j);
1564}
1565
1566
Steve Block44f0eee2011-05-26 01:26:41 +01001567// Conditional move.
1568void Assembler::movz(Register rd, Register rs, Register rt) {
1569 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1570}
1571
1572
1573void Assembler::movn(Register rd, Register rs, Register rt) {
1574 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1575}
1576
1577
1578void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1579 Register rt;
Ben Murdoch257744e2011-11-30 15:57:28 +00001580 rt.code_ = (cc & 0x0007) << 2 | 1;
Steve Block44f0eee2011-05-26 01:26:41 +01001581 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1582}
1583
1584
1585void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1586 Register rt;
Ben Murdoch257744e2011-11-30 15:57:28 +00001587 rt.code_ = (cc & 0x0007) << 2 | 0;
Steve Block44f0eee2011-05-26 01:26:41 +01001588 GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1589}
1590
1591
1592// Bit twiddling.
1593void Assembler::clz(Register rd, Register rs) {
1594 // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1595 GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1596}
1597
1598
1599void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1600 // Should be called via MacroAssembler::Ins.
1601 // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1602 ASSERT(mips32r2);
1603 GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1604}
1605
1606
1607void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1608 // Should be called via MacroAssembler::Ext.
1609 // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1610 ASSERT(mips32r2);
1611 GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1612}
1613
1614
Andrei Popescu31002712010-02-23 13:46:05 +00001615//--------Coprocessor-instructions----------------
1616
1617// Load, store, move.
1618void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1619 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1620}
1621
1622
1623void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
Steve Block44f0eee2011-05-26 01:26:41 +01001624 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1625 // load to two 32-bit loads.
1626 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1627 FPURegister nextfpreg;
1628 nextfpreg.setcode(fd.code() + 1);
1629 GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
Andrei Popescu31002712010-02-23 13:46:05 +00001630}
1631
1632
1633void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1634 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1635}
1636
1637
1638void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
Steve Block44f0eee2011-05-26 01:26:41 +01001639 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1640 // store to two 32-bit stores.
1641 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1642 FPURegister nextfpreg;
1643 nextfpreg.setcode(fd.code() + 1);
1644 GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
Andrei Popescu31002712010-02-23 13:46:05 +00001645}
1646
1647
Steve Block44f0eee2011-05-26 01:26:41 +01001648void Assembler::mtc1(Register rt, FPURegister fs) {
Andrei Popescu31002712010-02-23 13:46:05 +00001649 GenInstrRegister(COP1, MTC1, rt, fs, f0);
1650}
1651
1652
Steve Block44f0eee2011-05-26 01:26:41 +01001653void Assembler::mfc1(Register rt, FPURegister fs) {
Andrei Popescu31002712010-02-23 13:46:05 +00001654 GenInstrRegister(COP1, MFC1, rt, fs, f0);
1655}
1656
1657
Steve Block44f0eee2011-05-26 01:26:41 +01001658void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1659 GenInstrRegister(COP1, CTC1, rt, fs);
1660}
1661
1662
1663void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1664 GenInstrRegister(COP1, CFC1, rt, fs);
1665}
1666
Ben Murdoch589d6972011-11-30 16:04:58 +00001667void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
1668 uint64_t i;
1669 memcpy(&i, &d, 8);
1670
1671 *lo = i & 0xffffffff;
1672 *hi = i >> 32;
1673}
Steve Block44f0eee2011-05-26 01:26:41 +01001674
1675// Arithmetic.
1676
1677void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1678 GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
1679}
1680
1681
1682void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1683 GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
1684}
1685
1686
1687void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1688 GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
1689}
1690
1691
1692void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1693 GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
1694}
1695
1696
1697void Assembler::abs_d(FPURegister fd, FPURegister fs) {
1698 GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
1699}
1700
1701
1702void Assembler::mov_d(FPURegister fd, FPURegister fs) {
1703 GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
1704}
1705
1706
1707void Assembler::neg_d(FPURegister fd, FPURegister fs) {
1708 GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
1709}
1710
1711
1712void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
1713 GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
Andrei Popescu31002712010-02-23 13:46:05 +00001714}
1715
1716
1717// Conversions.
1718
1719void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
1720 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
1721}
1722
1723
1724void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
1725 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
1726}
1727
1728
Steve Block44f0eee2011-05-26 01:26:41 +01001729void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
1730 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
1731}
1732
1733
1734void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
1735 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
1736}
1737
1738
1739void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
1740 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
1741}
1742
1743
1744void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
1745 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
1746}
1747
1748
1749void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
1750 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
1751}
1752
1753
1754void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
1755 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
1756}
1757
1758
1759void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
1760 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1761}
1762
1763
1764void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1765 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1766}
1767
1768
Andrei Popescu31002712010-02-23 13:46:05 +00001769void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001770 ASSERT(mips32r2);
Andrei Popescu31002712010-02-23 13:46:05 +00001771 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
1772}
1773
1774
1775void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001776 ASSERT(mips32r2);
Andrei Popescu31002712010-02-23 13:46:05 +00001777 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
1778}
1779
1780
Steve Block44f0eee2011-05-26 01:26:41 +01001781void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1782 ASSERT(mips32r2);
1783 GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1784}
1785
1786
1787void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1788 ASSERT(mips32r2);
1789 GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1790}
1791
1792
1793void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1794 GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1795}
1796
1797
1798void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
1799 GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
1800}
1801
1802
1803void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
1804 GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
1805}
1806
1807
1808void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
1809 GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
1810}
1811
1812
1813void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
1814 GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
1815}
1816
1817
1818void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
1819 GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1820}
1821
1822
Andrei Popescu31002712010-02-23 13:46:05 +00001823void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
1824 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
1825}
1826
1827
1828void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001829 ASSERT(mips32r2);
Andrei Popescu31002712010-02-23 13:46:05 +00001830 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
1831}
1832
1833
1834void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
1835 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
1836}
1837
1838
1839void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
1840 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
1841}
1842
1843
1844void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
Steve Block44f0eee2011-05-26 01:26:41 +01001845 ASSERT(mips32r2);
Andrei Popescu31002712010-02-23 13:46:05 +00001846 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
1847}
1848
1849
1850void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
1851 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
1852}
1853
1854
1855// Conditions.
1856void Assembler::c(FPUCondition cond, SecondaryField fmt,
Steve Block44f0eee2011-05-26 01:26:41 +01001857 FPURegister fs, FPURegister ft, uint16_t cc) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001858 ASSERT(CpuFeatures::IsEnabled(FPU));
Andrei Popescu31002712010-02-23 13:46:05 +00001859 ASSERT(is_uint3(cc));
1860 ASSERT((fmt & ~(31 << kRsShift)) == 0);
1861 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
1862 | cc << 8 | 3 << 4 | cond;
1863 emit(instr);
1864}
1865
1866
Steve Block44f0eee2011-05-26 01:26:41 +01001867void Assembler::fcmp(FPURegister src1, const double src2,
1868 FPUCondition cond) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001869 ASSERT(CpuFeatures::IsEnabled(FPU));
Steve Block44f0eee2011-05-26 01:26:41 +01001870 ASSERT(src2 == 0.0);
1871 mtc1(zero_reg, f14);
1872 cvt_d_w(f14, f14);
1873 c(cond, D, src1, f14, 0);
1874}
1875
1876
Andrei Popescu31002712010-02-23 13:46:05 +00001877void Assembler::bc1f(int16_t offset, uint16_t cc) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001878 ASSERT(CpuFeatures::IsEnabled(FPU));
Andrei Popescu31002712010-02-23 13:46:05 +00001879 ASSERT(is_uint3(cc));
1880 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1881 emit(instr);
1882}
1883
1884
1885void Assembler::bc1t(int16_t offset, uint16_t cc) {
Ben Murdoch257744e2011-11-30 15:57:28 +00001886 ASSERT(CpuFeatures::IsEnabled(FPU));
Andrei Popescu31002712010-02-23 13:46:05 +00001887 ASSERT(is_uint3(cc));
1888 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1889 emit(instr);
1890}
1891
1892
1893// Debugging.
1894void Assembler::RecordJSReturn() {
Steve Block44f0eee2011-05-26 01:26:41 +01001895 positions_recorder()->WriteRecordedPositions();
Andrei Popescu31002712010-02-23 13:46:05 +00001896 CheckBuffer();
1897 RecordRelocInfo(RelocInfo::JS_RETURN);
1898}
1899
1900
Steve Block44f0eee2011-05-26 01:26:41 +01001901void Assembler::RecordDebugBreakSlot() {
1902 positions_recorder()->WriteRecordedPositions();
1903 CheckBuffer();
1904 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1905}
1906
1907
Andrei Popescu31002712010-02-23 13:46:05 +00001908void Assembler::RecordComment(const char* msg) {
Steve Block44f0eee2011-05-26 01:26:41 +01001909 if (FLAG_code_comments) {
Andrei Popescu31002712010-02-23 13:46:05 +00001910 CheckBuffer();
1911 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1912 }
1913}
1914
1915
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001916int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
1917 Instr instr = instr_at(pc);
1918 ASSERT(IsJ(instr) || IsLui(instr));
1919 if (IsLui(instr)) {
1920 Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
1921 Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
1922 ASSERT(IsOri(instr_ori));
1923 int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
1924 imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
1925 if (imm == kEndOfJumpChain) {
1926 return 0; // Number of instructions patched.
1927 }
1928 imm += pc_delta;
1929 ASSERT((imm & 3) == 0);
1930
1931 instr_lui &= ~kImm16Mask;
1932 instr_ori &= ~kImm16Mask;
1933
1934 instr_at_put(pc + 0 * Assembler::kInstrSize,
1935 instr_lui | ((imm >> kLuiShift) & kImm16Mask));
1936 instr_at_put(pc + 1 * Assembler::kInstrSize,
1937 instr_ori | (imm & kImm16Mask));
1938 return 2; // Number of instructions patched.
1939 } else {
1940 uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
1941 if ((int32_t)imm28 == kEndOfJumpChain) {
1942 return 0; // Number of instructions patched.
1943 }
1944 imm28 += pc_delta;
1945 imm28 &= kImm28Mask;
1946 ASSERT((imm28 & 3) == 0);
1947
1948 instr &= ~kImm26Mask;
1949 uint32_t imm26 = imm28 >> 2;
1950 ASSERT(is_uint26(imm26));
1951
1952 instr_at_put(pc, instr | (imm26 & kImm26Mask));
1953 return 1; // Number of instructions patched.
1954 }
1955}
1956
1957
Andrei Popescu31002712010-02-23 13:46:05 +00001958void Assembler::GrowBuffer() {
1959 if (!own_buffer_) FATAL("external code buffer is too small");
1960
1961 // Compute new buffer size.
Steve Block44f0eee2011-05-26 01:26:41 +01001962 CodeDesc desc; // The new buffer.
Andrei Popescu31002712010-02-23 13:46:05 +00001963 if (buffer_size_ < 4*KB) {
1964 desc.buffer_size = 4*KB;
1965 } else if (buffer_size_ < 1*MB) {
1966 desc.buffer_size = 2*buffer_size_;
1967 } else {
1968 desc.buffer_size = buffer_size_ + 1*MB;
1969 }
Steve Block44f0eee2011-05-26 01:26:41 +01001970 CHECK_GT(desc.buffer_size, 0); // No overflow.
Andrei Popescu31002712010-02-23 13:46:05 +00001971
1972 // Setup new buffer.
1973 desc.buffer = NewArray<byte>(desc.buffer_size);
1974
1975 desc.instr_size = pc_offset();
1976 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1977
1978 // Copy the data.
1979 int pc_delta = desc.buffer - buffer_;
1980 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1981 memmove(desc.buffer, buffer_, desc.instr_size);
1982 memmove(reloc_info_writer.pos() + rc_delta,
1983 reloc_info_writer.pos(), desc.reloc_size);
1984
1985 // Switch buffers.
1986 DeleteArray(buffer_);
1987 buffer_ = desc.buffer;
1988 buffer_size_ = desc.buffer_size;
1989 pc_ += pc_delta;
1990 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1991 reloc_info_writer.last_pc() + pc_delta);
1992
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001993 // Relocate runtime entries.
1994 for (RelocIterator it(desc); !it.done(); it.next()) {
1995 RelocInfo::Mode rmode = it.rinfo()->rmode();
1996 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
1997 byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
1998 RelocateInternalReference(p, pc_delta);
1999 }
2000 }
Andrei Popescu31002712010-02-23 13:46:05 +00002001
2002 ASSERT(!overflow());
2003}
2004
2005
Steve Block44f0eee2011-05-26 01:26:41 +01002006void Assembler::db(uint8_t data) {
2007 CheckBuffer();
2008 *reinterpret_cast<uint8_t*>(pc_) = data;
2009 pc_ += sizeof(uint8_t);
2010}
2011
2012
2013void Assembler::dd(uint32_t data) {
2014 CheckBuffer();
2015 *reinterpret_cast<uint32_t*>(pc_) = data;
2016 pc_ += sizeof(uint32_t);
2017}
2018
2019
Andrei Popescu31002712010-02-23 13:46:05 +00002020void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
Steve Block44f0eee2011-05-26 01:26:41 +01002021 RelocInfo rinfo(pc_, rmode, data); // We do not try to reuse pool constants.
2022 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
Andrei Popescu31002712010-02-23 13:46:05 +00002023 // Adjust code for new modes.
Steve Block44f0eee2011-05-26 01:26:41 +01002024 ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2025 || RelocInfo::IsJSReturn(rmode)
Andrei Popescu31002712010-02-23 13:46:05 +00002026 || RelocInfo::IsComment(rmode)
2027 || RelocInfo::IsPosition(rmode));
2028 // These modes do not need an entry in the constant pool.
2029 }
2030 if (rinfo.rmode() != RelocInfo::NONE) {
2031 // Don't record external references unless the heap will be serialized.
Ben Murdoch589d6972011-11-30 16:04:58 +00002032 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2033#ifdef DEBUG
2034 if (!Serializer::enabled()) {
2035 Serializer::TooLateToEnableNow();
2036 }
2037#endif
2038 if (!Serializer::enabled() && !emit_debug_code()) {
2039 return;
2040 }
Andrei Popescu31002712010-02-23 13:46:05 +00002041 }
Steve Block44f0eee2011-05-26 01:26:41 +01002042 ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
Ben Murdoch257744e2011-11-30 15:57:28 +00002043 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002044 RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
2045 ClearRecordedAstId();
Ben Murdoch257744e2011-11-30 15:57:28 +00002046 reloc_info_writer.Write(&reloc_info_with_ast_id);
2047 } else {
2048 reloc_info_writer.Write(&rinfo);
2049 }
Andrei Popescu31002712010-02-23 13:46:05 +00002050 }
2051}
2052
2053
Steve Block44f0eee2011-05-26 01:26:41 +01002054void Assembler::BlockTrampolinePoolFor(int instructions) {
2055 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2056}
2057
2058
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002059void Assembler::CheckTrampolinePool() {
Steve Block44f0eee2011-05-26 01:26:41 +01002060 // Some small sequences of instructions must not be broken up by the
2061 // insertion of a trampoline pool; such sequences are protected by setting
2062 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2063 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2064 // are blocked by trampoline_pool_blocked_nesting_.
2065 if ((trampoline_pool_blocked_nesting_ > 0) ||
2066 (pc_offset() < no_trampoline_pool_before_)) {
2067 // Emission is currently blocked; make sure we try again as soon as
2068 // possible.
2069 if (trampoline_pool_blocked_nesting_ > 0) {
2070 next_buffer_check_ = pc_offset() + kInstrSize;
2071 } else {
2072 next_buffer_check_ = no_trampoline_pool_before_;
2073 }
2074 return;
2075 }
2076
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002077 ASSERT(!trampoline_emitted_);
2078 ASSERT(unbound_labels_count_ >= 0);
2079 if (unbound_labels_count_ > 0) {
2080 // First we emit jump (2 instructions), then we emit trampoline pool.
2081 { BlockTrampolinePoolScope block_trampoline_pool(this);
2082 Label after_pool;
Steve Block44f0eee2011-05-26 01:26:41 +01002083 b(&after_pool);
2084 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01002085
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002086 int pool_start = pc_offset();
2087 for (int i = 0; i < unbound_labels_count_; i++) {
2088 uint32_t imm32;
2089 imm32 = jump_address(&after_pool);
2090 { BlockGrowBufferScope block_buf_growth(this);
2091 // Buffer growth (and relocation) must be blocked for internal
2092 // references until associated instructions are emitted and available
2093 // to be patched.
2094 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2095 lui(at, (imm32 & kHiMask) >> kLuiShift);
2096 ori(at, at, (imm32 & kImm16Mask));
2097 }
2098 jr(at);
2099 nop();
2100 }
2101 bind(&after_pool);
2102 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2103
2104 trampoline_emitted_ = true;
2105 // As we are only going to emit trampoline once, we need to prevent any
2106 // further emission.
2107 next_buffer_check_ = kMaxInt;
2108 }
2109 } else {
2110 // Number of branches to unbound label at this point is zero, so we can
2111 // move next buffer check to maximum.
2112 next_buffer_check_ = pc_offset() +
2113 kMaxBranchOffset - kTrampolineSlotsSize * 16;
Steve Block44f0eee2011-05-26 01:26:41 +01002114 }
2115 return;
2116}
2117
2118
Andrei Popescu31002712010-02-23 13:46:05 +00002119Address Assembler::target_address_at(Address pc) {
2120 Instr instr1 = instr_at(pc);
2121 Instr instr2 = instr_at(pc + kInstrSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00002122 // Interpret 2 instructions generated by li: lui/ori
2123 if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
2124 // Assemble the 32 bit value.
Andrei Popescu31002712010-02-23 13:46:05 +00002125 return reinterpret_cast<Address>(
Ben Murdoch257744e2011-11-30 15:57:28 +00002126 (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
Andrei Popescu31002712010-02-23 13:46:05 +00002127 }
2128
Ben Murdoch257744e2011-11-30 15:57:28 +00002129 // We should never get here, force a bad address if we do.
Andrei Popescu31002712010-02-23 13:46:05 +00002130 UNREACHABLE();
2131 return (Address)0x0;
2132}
2133
2134
Ben Murdoch589d6972011-11-30 16:04:58 +00002135// On Mips, a target address is stored in a lui/ori instruction pair, each
2136// of which load 16 bits of the 32-bit address to a register.
2137// Patching the address must replace both instr, and flush the i-cache.
2138//
2139// There is an optimization below, which emits a nop when the address
2140// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2141// and possibly removed.
Andrei Popescu31002712010-02-23 13:46:05 +00002142void Assembler::set_target_address_at(Address pc, Address target) {
Andrei Popescu31002712010-02-23 13:46:05 +00002143 Instr instr2 = instr_at(pc + kInstrSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00002144 uint32_t rt_code = GetRtField(instr2);
Andrei Popescu31002712010-02-23 13:46:05 +00002145 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2146 uint32_t itarget = reinterpret_cast<uint32_t>(target);
2147
Ben Murdoch589d6972011-11-30 16:04:58 +00002148#ifdef DEBUG
2149 // Check we have the result from a li macro-instruction, using instr pair.
2150 Instr instr1 = instr_at(pc);
2151 CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
2152#endif
2153
2154 // Must use 2 instructions to insure patchable code => just use lui and ori.
2155 // lui rt, upper-16.
2156 // ori rt rt, lower-16.
Ben Murdoch257744e2011-11-30 15:57:28 +00002157 *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2158 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
Andrei Popescu31002712010-02-23 13:46:05 +00002159
Ben Murdoch589d6972011-11-30 16:04:58 +00002160 // The following code is an optimization for the common case of Call()
2161 // or Jump() which is load to register, and jump through register:
2162 // li(t9, address); jalr(t9) (or jr(t9)).
2163 // If the destination address is in the same 256 MB page as the call, it
2164 // is faster to do a direct jal, or j, rather than jump thru register, since
2165 // that lets the cpu pipeline prefetch the target address. However each
2166 // time the address above is patched, we have to patch the direct jal/j
2167 // instruction, as well as possibly revert to jalr/jr if we now cross a
2168 // 256 MB page. Note that with the jal/j instructions, we do not need to
2169 // load the register, but that code is left, since it makes it easy to
2170 // revert this process. A further optimization could try replacing the
2171 // li sequence with nops.
2172 // This optimization can only be applied if the rt-code from instr2 is the
2173 // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
2174 // mips return. Occasionally this lands after an li().
2175
2176 Instr instr3 = instr_at(pc + 2 * kInstrSize);
2177 uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
2178 bool in_range =
2179 ((uint32_t)(ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
2180 uint32_t target_field = (uint32_t)(itarget & kJumpAddrMask) >> kImmFieldShift;
2181 bool patched_jump = false;
2182
2183#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
2184 // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
2185 // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
2186 // apply this workaround for all cores so we don't have to identify the core.
2187 if (in_range) {
2188 // The 24k core E156 bug has some very specific requirements, we only check
2189 // the most simple one: if the address of the delay slot instruction is in
2190 // the first or last 32 KB of the 256 MB segment.
2191 uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
2192 uint32_t ipc_segment_addr = ipc & segment_mask;
2193 if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2194 in_range = false;
2195 }
2196#endif
2197
2198 if (IsJalr(instr3)) {
2199 // Try to convert JALR to JAL.
2200 if (in_range && GetRt(instr2) == GetRs(instr3)) {
2201 *(p+2) = JAL | target_field;
2202 patched_jump = true;
2203 }
2204 } else if (IsJr(instr3)) {
2205 // Try to convert JR to J, skip returns (jr ra).
2206 bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
2207 if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
2208 *(p+2) = J | target_field;
2209 patched_jump = true;
2210 }
2211 } else if (IsJal(instr3)) {
2212 if (in_range) {
2213 // We are patching an already converted JAL.
2214 *(p+2) = JAL | target_field;
2215 } else {
2216 // Patch JAL, but out of range, revert to JALR.
2217 // JALR rs reg is the rt reg specified in the ORI instruction.
2218 uint32_t rs_field = GetRt(instr2) << kRsShift;
2219 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2220 *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2221 }
2222 patched_jump = true;
2223 } else if (IsJ(instr3)) {
2224 if (in_range) {
2225 // We are patching an already converted J (jump).
2226 *(p+2) = J | target_field;
2227 } else {
2228 // Trying patch J, but out of range, just go back to JR.
2229 // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
2230 uint32_t rs_field = GetRt(instr2) << kRsShift;
2231 *(p+2) = SPECIAL | rs_field | JR;
2232 }
2233 patched_jump = true;
2234 }
2235
2236 CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
Andrei Popescu31002712010-02-23 13:46:05 +00002237}
2238
Ben Murdoch589d6972011-11-30 16:04:58 +00002239void Assembler::JumpLabelToJumpRegister(Address pc) {
2240 // Address pc points to lui/ori instructions.
2241 // Jump to label may follow at pc + 2 * kInstrSize.
2242 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2243#ifdef DEBUG
2244 Instr instr1 = instr_at(pc);
2245#endif
2246 Instr instr2 = instr_at(pc + 1 * kInstrSize);
2247 Instr instr3 = instr_at(pc + 2 * kInstrSize);
2248 bool patched = false;
2249
2250 if (IsJal(instr3)) {
2251 ASSERT(GetOpcodeField(instr1) == LUI);
2252 ASSERT(GetOpcodeField(instr2) == ORI);
2253
2254 uint32_t rs_field = GetRt(instr2) << kRsShift;
2255 uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
2256 *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2257 patched = true;
2258 } else if (IsJ(instr3)) {
2259 ASSERT(GetOpcodeField(instr1) == LUI);
2260 ASSERT(GetOpcodeField(instr2) == ORI);
2261
2262 uint32_t rs_field = GetRt(instr2) << kRsShift;
2263 *(p+2) = SPECIAL | rs_field | JR;
2264 patched = true;
2265 }
2266
2267 if (patched) {
2268 CPU::FlushICache(pc+2, sizeof(Address));
2269 }
2270}
Andrei Popescu31002712010-02-23 13:46:05 +00002271
2272} } // namespace v8::internal
2273
Leon Clarkef7060e22010-06-03 12:02:55 +01002274#endif // V8_TARGET_ARCH_MIPS