blob: d9617dc7d34d4361a959e774e1e1bfebedbd54c8 [file] [log] [blame]
Andrei Popescu31002712010-02-23 13:46:05 +00001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2010 the V8 project authors. All rights reserved.
34
35
36#include "v8.h"
Leon Clarkef7060e22010-06-03 12:02:55 +010037
38#if defined(V8_TARGET_ARCH_MIPS)
39
Andrei Popescu31002712010-02-23 13:46:05 +000040#include "mips/assembler-mips-inl.h"
41#include "serialize.h"
42
43
44namespace v8 {
45namespace internal {
46
47
48
49const Register no_reg = { -1 };
50
51const Register zero_reg = { 0 };
52const Register at = { 1 };
53const Register v0 = { 2 };
54const Register v1 = { 3 };
55const Register a0 = { 4 };
56const Register a1 = { 5 };
57const Register a2 = { 6 };
58const Register a3 = { 7 };
59const Register t0 = { 8 };
60const Register t1 = { 9 };
61const Register t2 = { 10 };
62const Register t3 = { 11 };
63const Register t4 = { 12 };
64const Register t5 = { 13 };
65const Register t6 = { 14 };
66const Register t7 = { 15 };
67const Register s0 = { 16 };
68const Register s1 = { 17 };
69const Register s2 = { 18 };
70const Register s3 = { 19 };
71const Register s4 = { 20 };
72const Register s5 = { 21 };
73const Register s6 = { 22 };
74const Register s7 = { 23 };
75const Register t8 = { 24 };
76const Register t9 = { 25 };
77const Register k0 = { 26 };
78const Register k1 = { 27 };
79const Register gp = { 28 };
80const Register sp = { 29 };
81const Register s8_fp = { 30 };
82const Register ra = { 31 };
83
84
85const FPURegister no_creg = { -1 };
86
87const FPURegister f0 = { 0 };
88const FPURegister f1 = { 1 };
89const FPURegister f2 = { 2 };
90const FPURegister f3 = { 3 };
91const FPURegister f4 = { 4 };
92const FPURegister f5 = { 5 };
93const FPURegister f6 = { 6 };
94const FPURegister f7 = { 7 };
95const FPURegister f8 = { 8 };
96const FPURegister f9 = { 9 };
97const FPURegister f10 = { 10 };
98const FPURegister f11 = { 11 };
99const FPURegister f12 = { 12 };
100const FPURegister f13 = { 13 };
101const FPURegister f14 = { 14 };
102const FPURegister f15 = { 15 };
103const FPURegister f16 = { 16 };
104const FPURegister f17 = { 17 };
105const FPURegister f18 = { 18 };
106const FPURegister f19 = { 19 };
107const FPURegister f20 = { 20 };
108const FPURegister f21 = { 21 };
109const FPURegister f22 = { 22 };
110const FPURegister f23 = { 23 };
111const FPURegister f24 = { 24 };
112const FPURegister f25 = { 25 };
113const FPURegister f26 = { 26 };
114const FPURegister f27 = { 27 };
115const FPURegister f28 = { 28 };
116const FPURegister f29 = { 29 };
117const FPURegister f30 = { 30 };
118const FPURegister f31 = { 31 };
119
120int ToNumber(Register reg) {
121 ASSERT(reg.is_valid());
122 const int kNumbers[] = {
123 0, // zero_reg
124 1, // at
125 2, // v0
126 3, // v1
127 4, // a0
128 5, // a1
129 6, // a2
130 7, // a3
131 8, // t0
132 9, // t1
133 10, // t2
134 11, // t3
135 12, // t4
136 13, // t5
137 14, // t6
138 15, // t7
139 16, // s0
140 17, // s1
141 18, // s2
142 19, // s3
143 20, // s4
144 21, // s5
145 22, // s6
146 23, // s7
147 24, // t8
148 25, // t9
149 26, // k0
150 27, // k1
151 28, // gp
152 29, // sp
153 30, // s8_fp
154 31, // ra
155 };
156 return kNumbers[reg.code()];
157}
158
159Register ToRegister(int num) {
160 ASSERT(num >= 0 && num < kNumRegisters);
161 const Register kRegisters[] = {
162 zero_reg,
163 at,
164 v0, v1,
165 a0, a1, a2, a3,
166 t0, t1, t2, t3, t4, t5, t6, t7,
167 s0, s1, s2, s3, s4, s5, s6, s7,
168 t8, t9,
169 k0, k1,
170 gp,
171 sp,
172 s8_fp,
173 ra
174 };
175 return kRegisters[num];
176}
177
178
179// -----------------------------------------------------------------------------
180// Implementation of RelocInfo.
181
182const int RelocInfo::kApplyMask = 0;
183
184// Patch the code at the current address with the supplied instructions.
185void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
186 Instr* pc = reinterpret_cast<Instr*>(pc_);
187 Instr* instr = reinterpret_cast<Instr*>(instructions);
188 for (int i = 0; i < instruction_count; i++) {
189 *(pc + i) = *(instr + i);
190 }
191
192 // Indicate that code has changed.
193 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
194}
195
196
197// Patch the code at the current PC with a call to the target address.
198// Additional guard instructions can be added if required.
199void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
200 // Patch the code at the current address with a call to the target.
201 UNIMPLEMENTED_MIPS();
202}
203
204
205// -----------------------------------------------------------------------------
206// Implementation of Operand and MemOperand.
207// See assembler-mips-inl.h for inlined constructors.
208
209Operand::Operand(Handle<Object> handle) {
210 rm_ = no_reg;
211 // Verify all Objects referred by code are NOT in new space.
212 Object* obj = *handle;
213 ASSERT(!Heap::InNewSpace(obj));
214 if (obj->IsHeapObject()) {
215 imm32_ = reinterpret_cast<intptr_t>(handle.location());
216 rmode_ = RelocInfo::EMBEDDED_OBJECT;
217 } else {
218 // No relocation needed.
219 imm32_ = reinterpret_cast<intptr_t>(obj);
220 rmode_ = RelocInfo::NONE;
221 }
222}
223
224MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) {
225 offset_ = offset;
226}
227
228
229// -----------------------------------------------------------------------------
230// Implementation of Assembler.
231
232static const int kMinimalBufferSize = 4*KB;
233static byte* spare_buffer_ = NULL;
234
235Assembler::Assembler(void* buffer, int buffer_size) {
236 if (buffer == NULL) {
237 // Do our own buffer management.
238 if (buffer_size <= kMinimalBufferSize) {
239 buffer_size = kMinimalBufferSize;
240
241 if (spare_buffer_ != NULL) {
242 buffer = spare_buffer_;
243 spare_buffer_ = NULL;
244 }
245 }
246 if (buffer == NULL) {
247 buffer_ = NewArray<byte>(buffer_size);
248 } else {
249 buffer_ = static_cast<byte*>(buffer);
250 }
251 buffer_size_ = buffer_size;
252 own_buffer_ = true;
253
254 } else {
255 // Use externally provided buffer instead.
256 ASSERT(buffer_size > 0);
257 buffer_ = static_cast<byte*>(buffer);
258 buffer_size_ = buffer_size;
259 own_buffer_ = false;
260 }
261
262 // Setup buffer pointers.
263 ASSERT(buffer_ != NULL);
264 pc_ = buffer_;
265 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
266 current_statement_position_ = RelocInfo::kNoPosition;
267 current_position_ = RelocInfo::kNoPosition;
268 written_statement_position_ = current_statement_position_;
269 written_position_ = current_position_;
270}
271
272
273Assembler::~Assembler() {
274 if (own_buffer_) {
275 if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
276 spare_buffer_ = buffer_;
277 } else {
278 DeleteArray(buffer_);
279 }
280 }
281}
282
283
284void Assembler::GetCode(CodeDesc* desc) {
285 ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
286 // Setup code descriptor.
287 desc->buffer = buffer_;
288 desc->buffer_size = buffer_size_;
289 desc->instr_size = pc_offset();
290 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
291}
292
293
294// Labels refer to positions in the (to be) generated code.
295// There are bound, linked, and unused labels.
296//
297// Bound labels refer to known positions in the already
298// generated code. pos() is the position the label refers to.
299//
300// Linked labels refer to unknown positions in the code
301// to be generated; pos() is the position of the last
302// instruction using the label.
303
304
305// The link chain is terminated by a negative code position (must be aligned).
306const int kEndOfChain = -4;
307
308bool Assembler::is_branch(Instr instr) {
309 uint32_t opcode = ((instr & kOpcodeMask));
310 uint32_t rt_field = ((instr & kRtFieldMask));
311 uint32_t rs_field = ((instr & kRsFieldMask));
312 // Checks if the instruction is a branch.
313 return opcode == BEQ ||
314 opcode == BNE ||
315 opcode == BLEZ ||
316 opcode == BGTZ ||
317 opcode == BEQL ||
318 opcode == BNEL ||
319 opcode == BLEZL ||
320 opcode == BGTZL||
321 (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
322 rt_field == BLTZAL || rt_field == BGEZAL)) ||
323 (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
324}
325
326
327int Assembler::target_at(int32_t pos) {
328 Instr instr = instr_at(pos);
329 if ((instr & ~kImm16Mask) == 0) {
330 // Emitted label constant, not part of a branch.
331 return instr - (Code::kHeaderSize - kHeapObjectTag);
332 }
333 // Check we have a branch instruction.
334 ASSERT(is_branch(instr));
335 // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
336 // the compiler uses arithmectic shifts for signed integers.
337 int32_t imm18 = ((instr &
338 static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
339
340 return pos + kBranchPCOffset + imm18;
341}
342
343
344void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
345 Instr instr = instr_at(pos);
346 if ((instr & ~kImm16Mask) == 0) {
347 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
348 // Emitted label constant, not part of a branch.
349 // Make label relative to Code* of generated Code object.
350 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
351 return;
352 }
353
354 ASSERT(is_branch(instr));
355 int32_t imm18 = target_pos - (pos + kBranchPCOffset);
356 ASSERT((imm18 & 3) == 0);
357
358 instr &= ~kImm16Mask;
359 int32_t imm16 = imm18 >> 2;
360 ASSERT(is_int16(imm16));
361
362 instr_at_put(pos, instr | (imm16 & kImm16Mask));
363}
364
365
366void Assembler::print(Label* L) {
367 if (L->is_unused()) {
368 PrintF("unused label\n");
369 } else if (L->is_bound()) {
370 PrintF("bound label to %d\n", L->pos());
371 } else if (L->is_linked()) {
372 Label l = *L;
373 PrintF("unbound label");
374 while (l.is_linked()) {
375 PrintF("@ %d ", l.pos());
376 Instr instr = instr_at(l.pos());
377 if ((instr & ~kImm16Mask) == 0) {
378 PrintF("value\n");
379 } else {
380 PrintF("%d\n", instr);
381 }
382 next(&l);
383 }
384 } else {
385 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
386 }
387}
388
389
390void Assembler::bind_to(Label* L, int pos) {
391 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
392 while (L->is_linked()) {
393 int32_t fixup_pos = L->pos();
394 next(L); // call next before overwriting link with target at fixup_pos
395 target_at_put(fixup_pos, pos);
396 }
397 L->bind_to(pos);
398
399 // Keep track of the last bound label so we don't eliminate any instructions
400 // before a bound label.
401 if (pos > last_bound_pos_)
402 last_bound_pos_ = pos;
403}
404
405
406void Assembler::link_to(Label* L, Label* appendix) {
407 if (appendix->is_linked()) {
408 if (L->is_linked()) {
409 // Append appendix to L's list.
410 int fixup_pos;
411 int link = L->pos();
412 do {
413 fixup_pos = link;
414 link = target_at(fixup_pos);
415 } while (link > 0);
416 ASSERT(link == kEndOfChain);
417 target_at_put(fixup_pos, appendix->pos());
418 } else {
419 // L is empty, simply use appendix
420 *L = *appendix;
421 }
422 }
423 appendix->Unuse(); // appendix should not be used anymore
424}
425
426
427void Assembler::bind(Label* L) {
428 ASSERT(!L->is_bound()); // label can only be bound once
429 bind_to(L, pc_offset());
430}
431
432
433void Assembler::next(Label* L) {
434 ASSERT(L->is_linked());
435 int link = target_at(L->pos());
436 if (link > 0) {
437 L->link_to(link);
438 } else {
439 ASSERT(link == kEndOfChain);
440 L->Unuse();
441 }
442}
443
444
445// We have to use a temporary register for things that can be relocated even
446// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
447// space. There is no guarantee that the relocated location can be similarly
448// encoded.
449bool Assembler::MustUseAt(RelocInfo::Mode rmode) {
450 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
451 return Serializer::enabled();
452 } else if (rmode == RelocInfo::NONE) {
453 return false;
454 }
455 return true;
456}
457
458
459void Assembler::GenInstrRegister(Opcode opcode,
460 Register rs,
461 Register rt,
462 Register rd,
463 uint16_t sa,
464 SecondaryField func) {
465 ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
466 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
467 | (rd.code() << kRdShift) | (sa << kSaShift) | func;
468 emit(instr);
469}
470
471
472void Assembler::GenInstrRegister(Opcode opcode,
473 SecondaryField fmt,
474 FPURegister ft,
475 FPURegister fs,
476 FPURegister fd,
477 SecondaryField func) {
478 ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
479 Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << kFsShift)
480 | (fd.code() << 6) | func;
481 emit(instr);
482}
483
484
485void Assembler::GenInstrRegister(Opcode opcode,
486 SecondaryField fmt,
487 Register rt,
488 FPURegister fs,
489 FPURegister fd,
490 SecondaryField func) {
491 ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
492 Instr instr = opcode | fmt | (rt.code() << kRtShift)
493 | (fs.code() << kFsShift) | (fd.code() << 6) | func;
494 emit(instr);
495}
496
497
498// Instructions with immediate value.
499// Registers are in the order of the instruction encoding, from left to right.
500void Assembler::GenInstrImmediate(Opcode opcode,
501 Register rs,
502 Register rt,
503 int32_t j) {
504 ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
505 Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
506 | (j & kImm16Mask);
507 emit(instr);
508}
509
510
511void Assembler::GenInstrImmediate(Opcode opcode,
512 Register rs,
513 SecondaryField SF,
514 int32_t j) {
515 ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
516 Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
517 emit(instr);
518}
519
520
521void Assembler::GenInstrImmediate(Opcode opcode,
522 Register rs,
523 FPURegister ft,
524 int32_t j) {
525 ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
526 Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
527 | (j & kImm16Mask);
528 emit(instr);
529}
530
531
532// Registers are in the order of the instruction encoding, from left to right.
533void Assembler::GenInstrJump(Opcode opcode,
534 uint32_t address) {
535 ASSERT(is_uint26(address));
536 Instr instr = opcode | address;
537 emit(instr);
538}
539
540
541int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
542 int32_t target_pos;
543 if (L->is_bound()) {
544 target_pos = L->pos();
545 } else {
546 if (L->is_linked()) {
547 target_pos = L->pos(); // L's link
548 } else {
549 target_pos = kEndOfChain;
550 }
551 L->link_to(pc_offset());
552 }
553
554 int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
555 return offset;
556}
557
558
559void Assembler::label_at_put(Label* L, int at_offset) {
560 int target_pos;
561 if (L->is_bound()) {
562 target_pos = L->pos();
563 } else {
564 if (L->is_linked()) {
565 target_pos = L->pos(); // L's link
566 } else {
567 target_pos = kEndOfChain;
568 }
569 L->link_to(at_offset);
570 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
571 }
572}
573
574
575//------- Branch and jump instructions --------
576
577void Assembler::b(int16_t offset) {
578 beq(zero_reg, zero_reg, offset);
579}
580
581
582void Assembler::bal(int16_t offset) {
583 bgezal(zero_reg, offset);
584}
585
586
587void Assembler::beq(Register rs, Register rt, int16_t offset) {
588 GenInstrImmediate(BEQ, rs, rt, offset);
589}
590
591
592void Assembler::bgez(Register rs, int16_t offset) {
593 GenInstrImmediate(REGIMM, rs, BGEZ, offset);
594}
595
596
597void Assembler::bgezal(Register rs, int16_t offset) {
598 GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
599}
600
601
602void Assembler::bgtz(Register rs, int16_t offset) {
603 GenInstrImmediate(BGTZ, rs, zero_reg, offset);
604}
605
606
607void Assembler::blez(Register rs, int16_t offset) {
608 GenInstrImmediate(BLEZ, rs, zero_reg, offset);
609}
610
611
612void Assembler::bltz(Register rs, int16_t offset) {
613 GenInstrImmediate(REGIMM, rs, BLTZ, offset);
614}
615
616
617void Assembler::bltzal(Register rs, int16_t offset) {
618 GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
619}
620
621
622void Assembler::bne(Register rs, Register rt, int16_t offset) {
623 GenInstrImmediate(BNE, rs, rt, offset);
624}
625
626
627void Assembler::j(int32_t target) {
628 ASSERT(is_uint28(target) && ((target & 3) == 0));
629 GenInstrJump(J, target >> 2);
630}
631
632
633void Assembler::jr(Register rs) {
634 GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
635}
636
637
638void Assembler::jal(int32_t target) {
639 ASSERT(is_uint28(target) && ((target & 3) == 0));
640 GenInstrJump(JAL, target >> 2);
641}
642
643
644void Assembler::jalr(Register rs, Register rd) {
645 GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
646}
647
648
649//-------Data-processing-instructions---------
650
651// Arithmetic.
652
653void Assembler::add(Register rd, Register rs, Register rt) {
654 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADD);
655}
656
657
658void Assembler::addu(Register rd, Register rs, Register rt) {
659 GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
660}
661
662
663void Assembler::addi(Register rd, Register rs, int32_t j) {
664 GenInstrImmediate(ADDI, rs, rd, j);
665}
666
667
668void Assembler::addiu(Register rd, Register rs, int32_t j) {
669 GenInstrImmediate(ADDIU, rs, rd, j);
670}
671
672
673void Assembler::sub(Register rd, Register rs, Register rt) {
674 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUB);
675}
676
677
678void Assembler::subu(Register rd, Register rs, Register rt) {
679 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
680}
681
682
683void Assembler::mul(Register rd, Register rs, Register rt) {
684 GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
685}
686
687
688void Assembler::mult(Register rs, Register rt) {
689 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
690}
691
692
693void Assembler::multu(Register rs, Register rt) {
694 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
695}
696
697
698void Assembler::div(Register rs, Register rt) {
699 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
700}
701
702
703void Assembler::divu(Register rs, Register rt) {
704 GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
705}
706
707
708// Logical.
709
710void Assembler::and_(Register rd, Register rs, Register rt) {
711 GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
712}
713
714
715void Assembler::andi(Register rt, Register rs, int32_t j) {
716 GenInstrImmediate(ANDI, rs, rt, j);
717}
718
719
720void Assembler::or_(Register rd, Register rs, Register rt) {
721 GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
722}
723
724
725void Assembler::ori(Register rt, Register rs, int32_t j) {
726 GenInstrImmediate(ORI, rs, rt, j);
727}
728
729
730void Assembler::xor_(Register rd, Register rs, Register rt) {
731 GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
732}
733
734
735void Assembler::xori(Register rt, Register rs, int32_t j) {
736 GenInstrImmediate(XORI, rs, rt, j);
737}
738
739
740void Assembler::nor(Register rd, Register rs, Register rt) {
741 GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
742}
743
744
745// Shifts.
746void Assembler::sll(Register rd, Register rt, uint16_t sa) {
747 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
748}
749
750
751void Assembler::sllv(Register rd, Register rt, Register rs) {
752 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
753}
754
755
756void Assembler::srl(Register rd, Register rt, uint16_t sa) {
757 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
758}
759
760
761void Assembler::srlv(Register rd, Register rt, Register rs) {
762 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
763}
764
765
766void Assembler::sra(Register rd, Register rt, uint16_t sa) {
767 GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
768}
769
770
771void Assembler::srav(Register rd, Register rt, Register rs) {
772 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
773}
774
775
776//------------Memory-instructions-------------
777
778void Assembler::lb(Register rd, const MemOperand& rs) {
779 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
780}
781
782
783void Assembler::lbu(Register rd, const MemOperand& rs) {
784 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
785}
786
787
788void Assembler::lw(Register rd, const MemOperand& rs) {
789 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
790}
791
792
793void Assembler::sb(Register rd, const MemOperand& rs) {
794 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
795}
796
797
798void Assembler::sw(Register rd, const MemOperand& rs) {
799 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
800}
801
802
803void Assembler::lui(Register rd, int32_t j) {
804 GenInstrImmediate(LUI, zero_reg, rd, j);
805}
806
807
808//-------------Misc-instructions--------------
809
810// Break / Trap instructions.
811void Assembler::break_(uint32_t code) {
812 ASSERT((code & ~0xfffff) == 0);
813 Instr break_instr = SPECIAL | BREAK | (code << 6);
814 emit(break_instr);
815}
816
817
818void Assembler::tge(Register rs, Register rt, uint16_t code) {
819 ASSERT(is_uint10(code));
820 Instr instr = SPECIAL | TGE | rs.code() << kRsShift
821 | rt.code() << kRtShift | code << 6;
822 emit(instr);
823}
824
825
826void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
827 ASSERT(is_uint10(code));
828 Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
829 | rt.code() << kRtShift | code << 6;
830 emit(instr);
831}
832
833
834void Assembler::tlt(Register rs, Register rt, uint16_t code) {
835 ASSERT(is_uint10(code));
836 Instr instr =
837 SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
838 emit(instr);
839}
840
841
842void Assembler::tltu(Register rs, Register rt, uint16_t code) {
843 ASSERT(is_uint10(code));
844 Instr instr = SPECIAL | TLTU | rs.code() << kRsShift
845 | rt.code() << kRtShift | code << 6;
846 emit(instr);
847}
848
849
850void Assembler::teq(Register rs, Register rt, uint16_t code) {
851 ASSERT(is_uint10(code));
852 Instr instr =
853 SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
854 emit(instr);
855}
856
857
858void Assembler::tne(Register rs, Register rt, uint16_t code) {
859 ASSERT(is_uint10(code));
860 Instr instr =
861 SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
862 emit(instr);
863}
864
865
866// Move from HI/LO register.
867
868void Assembler::mfhi(Register rd) {
869 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
870}
871
872
873void Assembler::mflo(Register rd) {
874 GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
875}
876
877
878// Set on less than instructions.
879void Assembler::slt(Register rd, Register rs, Register rt) {
880 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
881}
882
883
884void Assembler::sltu(Register rd, Register rs, Register rt) {
885 GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
886}
887
888
889void Assembler::slti(Register rt, Register rs, int32_t j) {
890 GenInstrImmediate(SLTI, rs, rt, j);
891}
892
893
894void Assembler::sltiu(Register rt, Register rs, int32_t j) {
895 GenInstrImmediate(SLTIU, rs, rt, j);
896}
897
898
899//--------Coprocessor-instructions----------------
900
901// Load, store, move.
902void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
903 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
904}
905
906
907void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
908 GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
909}
910
911
912void Assembler::swc1(FPURegister fd, const MemOperand& src) {
913 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
914}
915
916
917void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
918 GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
919}
920
921
922void Assembler::mtc1(FPURegister fs, Register rt) {
923 GenInstrRegister(COP1, MTC1, rt, fs, f0);
924}
925
926
927void Assembler::mthc1(FPURegister fs, Register rt) {
928 GenInstrRegister(COP1, MTHC1, rt, fs, f0);
929}
930
931
932void Assembler::mfc1(FPURegister fs, Register rt) {
933 GenInstrRegister(COP1, MFC1, rt, fs, f0);
934}
935
936
937void Assembler::mfhc1(FPURegister fs, Register rt) {
938 GenInstrRegister(COP1, MFHC1, rt, fs, f0);
939}
940
941
942// Conversions.
943
944void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
945 GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
946}
947
948
949void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
950 GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
951}
952
953
954void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
955 GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
956}
957
958
959void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
960 GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
961}
962
963
964void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
965 GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
966}
967
968
969void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
970 GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
971}
972
973
974void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
975 GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
976}
977
978
979void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
980 GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
981}
982
983
984void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
985 GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
986}
987
988
989void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
990 GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
991}
992
993
994// Conditions.
995void Assembler::c(FPUCondition cond, SecondaryField fmt,
996 FPURegister ft, FPURegister fs, uint16_t cc) {
997 ASSERT(is_uint3(cc));
998 ASSERT((fmt & ~(31 << kRsShift)) == 0);
999 Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
1000 | cc << 8 | 3 << 4 | cond;
1001 emit(instr);
1002}
1003
1004
1005void Assembler::bc1f(int16_t offset, uint16_t cc) {
1006 ASSERT(is_uint3(cc));
1007 Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1008 emit(instr);
1009}
1010
1011
1012void Assembler::bc1t(int16_t offset, uint16_t cc) {
1013 ASSERT(is_uint3(cc));
1014 Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1015 emit(instr);
1016}
1017
1018
1019// Debugging.
1020void Assembler::RecordJSReturn() {
1021 WriteRecordedPositions();
1022 CheckBuffer();
1023 RecordRelocInfo(RelocInfo::JS_RETURN);
1024}
1025
1026
1027void Assembler::RecordComment(const char* msg) {
1028 if (FLAG_debug_code) {
1029 CheckBuffer();
1030 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1031 }
1032}
1033
1034
1035void Assembler::RecordPosition(int pos) {
1036 if (pos == RelocInfo::kNoPosition) return;
1037 ASSERT(pos >= 0);
1038 current_position_ = pos;
1039}
1040
1041
1042void Assembler::RecordStatementPosition(int pos) {
1043 if (pos == RelocInfo::kNoPosition) return;
1044 ASSERT(pos >= 0);
1045 current_statement_position_ = pos;
1046}
1047
1048
1049void Assembler::WriteRecordedPositions() {
1050 // Write the statement position if it is different from what was written last
1051 // time.
1052 if (current_statement_position_ != written_statement_position_) {
1053 CheckBuffer();
1054 RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
1055 written_statement_position_ = current_statement_position_;
1056 }
1057
1058 // Write the position if it is different from what was written last time and
1059 // also different from the written statement position.
1060 if (current_position_ != written_position_ &&
1061 current_position_ != written_statement_position_) {
1062 CheckBuffer();
1063 RecordRelocInfo(RelocInfo::POSITION, current_position_);
1064 written_position_ = current_position_;
1065 }
1066}
1067
1068
1069void Assembler::GrowBuffer() {
1070 if (!own_buffer_) FATAL("external code buffer is too small");
1071
1072 // Compute new buffer size.
1073 CodeDesc desc; // the new buffer
1074 if (buffer_size_ < 4*KB) {
1075 desc.buffer_size = 4*KB;
1076 } else if (buffer_size_ < 1*MB) {
1077 desc.buffer_size = 2*buffer_size_;
1078 } else {
1079 desc.buffer_size = buffer_size_ + 1*MB;
1080 }
1081 CHECK_GT(desc.buffer_size, 0); // no overflow
1082
1083 // Setup new buffer.
1084 desc.buffer = NewArray<byte>(desc.buffer_size);
1085
1086 desc.instr_size = pc_offset();
1087 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1088
1089 // Copy the data.
1090 int pc_delta = desc.buffer - buffer_;
1091 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1092 memmove(desc.buffer, buffer_, desc.instr_size);
1093 memmove(reloc_info_writer.pos() + rc_delta,
1094 reloc_info_writer.pos(), desc.reloc_size);
1095
1096 // Switch buffers.
1097 DeleteArray(buffer_);
1098 buffer_ = desc.buffer;
1099 buffer_size_ = desc.buffer_size;
1100 pc_ += pc_delta;
1101 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1102 reloc_info_writer.last_pc() + pc_delta);
1103
1104
1105 // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
1106 // shift by pc_delta. But on MIPS the target address it directly loaded, so
1107 // we do not need to relocate here.
1108
1109 ASSERT(!overflow());
1110}
1111
1112
1113void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1114 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
1115 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
1116 // Adjust code for new modes.
1117 ASSERT(RelocInfo::IsJSReturn(rmode)
1118 || RelocInfo::IsComment(rmode)
1119 || RelocInfo::IsPosition(rmode));
1120 // These modes do not need an entry in the constant pool.
1121 }
1122 if (rinfo.rmode() != RelocInfo::NONE) {
1123 // Don't record external references unless the heap will be serialized.
1124 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
1125 !Serializer::enabled() &&
1126 !FLAG_debug_code) {
1127 return;
1128 }
1129 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
1130 reloc_info_writer.Write(&rinfo);
1131 }
1132}
1133
1134
1135Address Assembler::target_address_at(Address pc) {
1136 Instr instr1 = instr_at(pc);
1137 Instr instr2 = instr_at(pc + kInstrSize);
1138 // Check we have 2 instructions generated by li.
1139 ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
1140 ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
1141 (instr2 & kOpcodeMask) == ORI ||
1142 (instr2 & kOpcodeMask) == LUI)));
1143 // Interpret these 2 instructions.
1144 if (instr1 == nopInstr) {
1145 if ((instr2 & kOpcodeMask) == ADDI) {
1146 return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
1147 } else if ((instr2 & kOpcodeMask) == ORI) {
1148 return reinterpret_cast<Address>(instr2 & kImm16Mask);
1149 } else if ((instr2 & kOpcodeMask) == LUI) {
1150 return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
1151 }
1152 } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
1153 // 32 bits value.
1154 return reinterpret_cast<Address>(
1155 (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
1156 }
1157
1158 // We should never get here.
1159 UNREACHABLE();
1160 return (Address)0x0;
1161}
1162
1163
1164void Assembler::set_target_address_at(Address pc, Address target) {
1165 // On MIPS we need to patch the code to generate.
1166
1167 // First check we have a li.
1168 Instr instr2 = instr_at(pc + kInstrSize);
1169#ifdef DEBUG
1170 Instr instr1 = instr_at(pc);
1171
1172 // Check we have indeed the result from a li with MustUseAt true.
1173 CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
1174 ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
1175 (instr2 & kOpcodeMask)== ORI ||
1176 (instr2 & kOpcodeMask)== LUI)));
1177#endif
1178
1179
1180 uint32_t rt_code = (instr2 & kRtFieldMask);
1181 uint32_t* p = reinterpret_cast<uint32_t*>(pc);
1182 uint32_t itarget = reinterpret_cast<uint32_t>(target);
1183
1184 if (is_int16(itarget)) {
1185 // nop
1186 // addiu rt zero_reg j
1187 *p = nopInstr;
1188 *(p+1) = ADDIU | rt_code | (itarget & LOMask);
1189 } else if (!(itarget & HIMask)) {
1190 // nop
1191 // ori rt zero_reg j
1192 *p = nopInstr;
1193 *(p+1) = ORI | rt_code | (itarget & LOMask);
1194 } else if (!(itarget & LOMask)) {
1195 // nop
1196 // lui rt (HIMask & itarget)>>16
1197 *p = nopInstr;
1198 *(p+1) = LUI | rt_code | ((itarget & HIMask)>>16);
1199 } else {
1200 // lui rt (HIMask & itarget)>>16
1201 // ori rt rt, (LOMask & itarget)
1202 *p = LUI | rt_code | ((itarget & HIMask)>>16);
1203 *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & LOMask);
1204 }
1205
1206 CPU::FlushICache(pc, 2 * sizeof(int32_t));
1207}
1208
1209
1210} } // namespace v8::internal
1211
Leon Clarkef7060e22010-06-03 12:02:55 +01001212#endif // V8_TARGET_ARCH_MIPS