blob: e057fe033b77289d354bfea16155a34ff598256c [file] [log] [blame]
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003//
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00004// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000019//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +000022// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
33// The original source code covered by the above license above has been modified
34// significantly by Google Inc.
ager@chromium.org9258b6b2008-09-11 09:11:10 +000035// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000036
37#include "v8.h"
38
39#include "assembler-arm-inl.h"
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +000040#include "serialize.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000041
42namespace v8 { namespace internal {
43
44DEFINE_bool(debug_code, false,
45 "generate extra code (comments, assertions) for debugging");
46
47
48// -----------------------------------------------------------------------------
49// Implementation of Register and CRegister
50
51Register no_reg = { -1 };
52
53Register r0 = { 0 };
54Register r1 = { 1 };
55Register r2 = { 2 };
56Register r3 = { 3 };
57Register r4 = { 4 };
58Register r5 = { 5 };
59Register r6 = { 6 };
60Register r7 = { 7 };
61Register r8 = { 8 };
62Register r9 = { 9 };
63Register r10 = { 10 };
64Register fp = { 11 };
65Register ip = { 12 };
66Register sp = { 13 };
67Register lr = { 14 };
68Register pc = { 15 };
69
70
71CRegister no_creg = { -1 };
72
73CRegister cr0 = { 0 };
74CRegister cr1 = { 1 };
75CRegister cr2 = { 2 };
76CRegister cr3 = { 3 };
77CRegister cr4 = { 4 };
78CRegister cr5 = { 5 };
79CRegister cr6 = { 6 };
80CRegister cr7 = { 7 };
81CRegister cr8 = { 8 };
82CRegister cr9 = { 9 };
83CRegister cr10 = { 10 };
84CRegister cr11 = { 11 };
85CRegister cr12 = { 12 };
86CRegister cr13 = { 13 };
87CRegister cr14 = { 14 };
88CRegister cr15 = { 15 };
89
90
91// In order to determine the pc store offset, we execute a small code sequence.
92// See ARM Architecture Reference Manual section A-2.4.3
93// Note that 'str pc, [sp]' and 'stmia sp, {pc}' were using different offsets
94// under the QEMU emulator (now fixed), so we are careful to test the actual
95// instruction we are interested in (stmia).
96int PcStoreOffset() {
97#if !defined(__arm__)
98 // Building an ARM emulator based target. The emulator is wired for 8 byte
99 // pc offsets as is the default in the spec.
100 static int pc_store_offset = 8;
101#elif defined(__arm__) && !defined(__thumb__)
102 // __arm__ may be defined in thumb mode.
103 static int pc_store_offset = -1;
104 asm volatile(
105 "sub sp, sp, #4 \n\t"
106 "sub r1, pc, #4 \n\t"
107 "stmia sp, {pc} \n\t"
108 "ldr r0, [sp] \n\t"
109 "add sp, sp, #4 \n\t"
110 "sub %0, r0, r1 \n\t"
111 : "=r" (pc_store_offset) : : "r0", "r1", "memory");
112#elif defined(__thumb__)
113 static int pc_store_offset = -1;
114 asm volatile(
115 "@ Enter ARM Mode \n\t"
116 "adr r2, 1f \n\t"
117 "bx r2 \n\t"
118 ".ALIGN 4 \n\t"
119 ".ARM \n"
120 "1: sub sp, sp, #4 \n\t"
121 "sub r1, pc, #4 \n\t"
122 "stmia sp, {pc} \n\t"
123 "ldr r0, [sp] \n\t"
124 "add sp, sp, #4 \n\t"
125 "sub %0, r0, r1 \n"
126 "@ Enter THUMB Mode\n\t"
127 "adr r2, 2f+1 \n\t"
128 "bx r2 \n\t"
129 ".THUMB \n"
130 "2: \n\t"
131 : "=r" (pc_store_offset) : : "r0", "r1", "r2", "memory");
132#else
133#error unsupported architecture
134#endif
135 ASSERT(pc_store_offset == 8 || pc_store_offset == 12);
136 return pc_store_offset;
137}
138
139
140// -----------------------------------------------------------------------------
141// Implementation of RelocInfo
142
143const int RelocInfo::kApplyMask = 0;
144
145
146void RelocInfo::patch_code(byte* instructions, int instruction_count) {
147 // Patch the code at the current address with the supplied instructions.
148 UNIMPLEMENTED();
149}
150
151
152// Patch the code at the current PC with a call to the target address.
153// Additional guard int3 instructions can be added if required.
154void RelocInfo::patch_code_with_call(Address target, int guard_bytes) {
155 // Patch the code at the current address with a call to the target.
156 UNIMPLEMENTED();
157}
158
159
160// -----------------------------------------------------------------------------
161// Implementation of Operand and MemOperand
162// See assembler-arm-inl.h for inlined constructors
163
164Operand::Operand(Handle<Object> handle) {
165 rm_ = no_reg;
166 // Verify all Objects referred by code are NOT in new space.
167 Object* obj = *handle;
168 ASSERT(!Heap::InNewSpace(obj));
169 if (obj->IsHeapObject()) {
170 imm32_ = reinterpret_cast<intptr_t>(handle.location());
171 rmode_ = embedded_object;
172 } else {
173 // no relocation needed
174 imm32_ = reinterpret_cast<intptr_t>(obj);
175 rmode_ = no_reloc;
176 }
177}
178
179
180Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
181 ASSERT(is_uint5(shift_imm));
182 ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
183 rm_ = rm;
184 rs_ = no_reg;
185 shift_op_ = shift_op;
186 shift_imm_ = shift_imm & 31;
187 if (shift_op == RRX) {
188 // encoded as ROR with shift_imm == 0
189 ASSERT(shift_imm == 0);
190 shift_op_ = ROR;
191 shift_imm_ = 0;
192 }
193}
194
195
196Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
197 ASSERT(shift_op != RRX);
198 rm_ = rm;
199 rs_ = no_reg;
200 shift_op_ = shift_op;
201 rs_ = rs;
202}
203
204
205MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
206 rn_ = rn;
207 rm_ = no_reg;
208 offset_ = offset;
209 am_ = am;
210}
211
212MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
213 rn_ = rn;
214 rm_ = rm;
215 shift_op_ = LSL;
216 shift_imm_ = 0;
217 am_ = am;
218}
219
220
221MemOperand::MemOperand(Register rn, Register rm,
222 ShiftOp shift_op, int shift_imm, AddrMode am) {
223 ASSERT(is_uint5(shift_imm));
224 rn_ = rn;
225 rm_ = rm;
226 shift_op_ = shift_op;
227 shift_imm_ = shift_imm & 31;
228 am_ = am;
229}
230
231
232// -----------------------------------------------------------------------------
233// Implementation of Assembler
234
235// Instruction encoding bits
236enum {
237 H = 1 << 5, // halfword (or byte)
238 S6 = 1 << 6, // signed (or unsigned)
239 L = 1 << 20, // load (or store)
240 S = 1 << 20, // set condition code (or leave unchanged)
241 W = 1 << 21, // writeback base register (or leave unchanged)
242 A = 1 << 21, // accumulate in multiply instruction (or not)
243 B = 1 << 22, // unsigned byte (or word)
244 N = 1 << 22, // long (or short)
245 U = 1 << 23, // positive (or negative) offset/index
246 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
247 I = 1 << 25, // immediate shifter operand (or not)
248
249 B4 = 1 << 4,
250 B5 = 1 << 5,
251 B7 = 1 << 7,
252 B8 = 1 << 8,
253 B12 = 1 << 12,
254 B16 = 1 << 16,
255 B20 = 1 << 20,
256 B21 = 1 << 21,
257 B22 = 1 << 22,
258 B23 = 1 << 23,
259 B24 = 1 << 24,
260 B25 = 1 << 25,
261 B26 = 1 << 26,
262 B27 = 1 << 27,
263
264 // Instruction bit masks
mads.s.ager31e71382008-08-13 09:32:07 +0000265 RdMask = 15 << 12, // in str instruction
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000266 CondMask = 15 << 28,
267 OpCodeMask = 15 << 21, // in data-processing instructions
268 Imm24Mask = (1 << 24) - 1,
269 Off12Mask = (1 << 12) - 1,
270 // Reserved condition
271 nv = 15 << 28
272};
273
274
mads.s.ager31e71382008-08-13 09:32:07 +0000275DEFINE_bool(push_pop_elimination, true,
276 "eliminate redundant push/pops in assembly code");
277DEFINE_bool(print_push_pop_elimination, false,
278 "print elimination of redundant push/pops in assembly code");
279
280// add(sp, sp, 4) instruction (aka Pop())
281static const Instr kPopInstruction =
282 al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
283// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
284// register r is not encoded.
285static const Instr kPushRegPattern =
286 al | B26 | 4 | NegPreIndex | sp.code() * B16;
287// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
288// register r is not encoded.
289static const Instr kPopRegPattern =
290 al | B26 | L | 4 | PostIndex | sp.code() * B16;
291
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000292// spare_buffer_
293static const int kMinimalBufferSize = 4*KB;
294static byte* spare_buffer_ = NULL;
295
296Assembler::Assembler(void* buffer, int buffer_size) {
297 if (buffer == NULL) {
298 // do our own buffer management
299 if (buffer_size <= kMinimalBufferSize) {
300 buffer_size = kMinimalBufferSize;
301
302 if (spare_buffer_ != NULL) {
303 buffer = spare_buffer_;
304 spare_buffer_ = NULL;
305 }
306 }
307 if (buffer == NULL) {
308 buffer_ = NewArray<byte>(buffer_size);
309 } else {
310 buffer_ = static_cast<byte*>(buffer);
311 }
312 buffer_size_ = buffer_size;
313 own_buffer_ = true;
314
315 } else {
316 // use externally provided buffer instead
317 ASSERT(buffer_size > 0);
318 buffer_ = static_cast<byte*>(buffer);
319 buffer_size_ = buffer_size;
320 own_buffer_ = false;
321 }
322
323 // setup buffer pointers
324 ASSERT(buffer_ != NULL);
325 pc_ = buffer_;
326 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
327 num_prinfo_ = 0;
328 next_buffer_check_ = 0;
329 no_const_pool_before_ = 0;
330 last_const_pool_end_ = 0;
331 last_bound_pos_ = 0;
332 last_position_ = kNoPosition;
333 last_position_is_statement_ = false;
334}
335
336
337Assembler::~Assembler() {
338 if (own_buffer_) {
339 if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
340 spare_buffer_ = buffer_;
341 } else {
342 DeleteArray(buffer_);
343 }
344 }
345}
346
347
348void Assembler::GetCode(CodeDesc* desc) {
349 // finalize code
350 if (unbound_label_.is_linked())
351 bind_to(&unbound_label_, binding_pos_);
352
353 // emit constant pool if necessary
354 CheckConstPool(true, false);
355 ASSERT(num_prinfo_ == 0);
356
357 // setup desc
358 desc->buffer = buffer_;
359 desc->buffer_size = buffer_size_;
360 desc->instr_size = pc_offset();
361 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
362}
363
364
365void Assembler::Align(int m) {
366 ASSERT(m >= 4 && IsPowerOf2(m));
367 while ((pc_offset() & (m - 1)) != 0) {
368 nop();
369 }
370}
371
372
373// Labels refer to positions in the (to be) generated code.
374// There are bound, linked, and unused labels.
375//
376// Bound labels refer to known positions in the already
377// generated code. pos() is the position the label refers to.
378//
379// Linked labels refer to unknown positions in the code
380// to be generated; pos() is the position of the last
381// instruction using the label.
382
383
384// The link chain is terminated by a negative code position (must be aligned)
385const int kEndOfChain = -4;
386
387
388int Assembler::target_at(int pos) {
389 Instr instr = instr_at(pos);
390 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
391 int imm26 = ((instr & Imm24Mask) << 8) >> 6;
392 if ((instr & CondMask) == nv && (instr & B24) != 0)
393 // blx uses bit 24 to encode bit 2 of imm26
394 imm26 += 2;
395
396 return pos + 8 + imm26;
397}
398
399
400void Assembler::target_at_put(int pos, int target_pos) {
401 int imm26 = target_pos - pos - 8;
402 Instr instr = instr_at(pos);
403 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
404 if ((instr & CondMask) == nv) {
405 // blx uses bit 24 to encode bit 2 of imm26
406 ASSERT((imm26 & 1) == 0);
407 instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
408 } else {
409 ASSERT((imm26 & 3) == 0);
410 instr &= ~Imm24Mask;
411 }
412 int imm24 = imm26 >> 2;
413 ASSERT(is_int24(imm24));
414 instr_at_put(pos, instr | (imm24 & Imm24Mask));
415}
416
417
418void Assembler::print(Label* L) {
419 if (L->is_unused()) {
420 PrintF("unused label\n");
421 } else if (L->is_bound()) {
422 PrintF("bound label to %d\n", L->pos());
423 } else if (L->is_linked()) {
424 Label l = *L;
425 PrintF("unbound label");
426 while (l.is_linked()) {
427 PrintF("@ %d ", l.pos());
428 Instr instr = instr_at(l.pos());
429 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
430 int cond = instr & CondMask;
431 const char* b;
432 const char* c;
433 if (cond == nv) {
434 b = "blx";
435 c = "";
436 } else {
437 if ((instr & B24) != 0)
438 b = "bl";
439 else
440 b = "b";
441
442 switch (cond) {
443 case eq: c = "eq"; break;
444 case ne: c = "ne"; break;
445 case hs: c = "hs"; break;
446 case lo: c = "lo"; break;
447 case mi: c = "mi"; break;
448 case pl: c = "pl"; break;
449 case vs: c = "vs"; break;
450 case vc: c = "vc"; break;
451 case hi: c = "hi"; break;
452 case ls: c = "ls"; break;
453 case ge: c = "ge"; break;
454 case lt: c = "lt"; break;
455 case gt: c = "gt"; break;
456 case le: c = "le"; break;
457 case al: c = ""; break;
458 default:
459 c = "";
460 UNREACHABLE();
461 }
462 }
463 PrintF("%s%s\n", b, c);
464 next(&l);
465 }
466 } else {
467 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
468 }
469}
470
471
472DEFINE_bool(eliminate_jumps, true, "eliminate jumps to jumps in assembly code");
473DEFINE_bool(print_jump_elimination, false,
474 "print elimination of jumps to jumps in assembly code");
475
476void Assembler::bind_to(Label* L, int pos) {
477 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
478 while (L->is_linked()) {
479 int fixup_pos = L->pos();
480 next(L); // call next before overwriting link with target at fixup_pos
481 target_at_put(fixup_pos, pos);
482 }
483 L->bind_to(pos);
484
485 // do not eliminate jump instructions before the last bound position
486 if (pos > last_bound_pos_)
487 last_bound_pos_ = pos;
488}
489
490
491void Assembler::link_to(Label* L, Label* appendix) {
492 if (appendix->is_linked()) {
493 if (L->is_linked()) {
494 // append appendix to L's list
495 int fixup_pos;
496 int link = L->pos();
497 do {
498 fixup_pos = link;
499 link = target_at(fixup_pos);
500 } while (link > 0);
501 ASSERT(link == kEndOfChain);
502 target_at_put(fixup_pos, appendix->pos());
503 } else {
504 // L is empty, simply use appendix
505 *L = *appendix;
506 }
507 }
508 appendix->Unuse(); // appendix should not be used anymore
509}
510
511
512void Assembler::bind(Label* L) {
513 ASSERT(!L->is_bound()); // label can only be bound once
514 if (FLAG_eliminate_jumps) {
515 // Resolve unbound label.
516 if (unbound_label_.is_linked()) {
517 // Unbound label exists => link it with L if same binding
518 // position, otherwise fix it.
519 if (binding_pos_ == pc_offset()) {
520 // Link it to L's list.
521 link_to(L, &unbound_label_);
522 } else {
523 // Otherwise bind unbound label.
524 ASSERT(binding_pos_ < pc_offset());
525 bind_to(&unbound_label_, binding_pos_);
526 }
527 }
528 ASSERT(!unbound_label_.is_linked());
529 // Try to eliminate jumps to next instruction.
530 Instr instr;
531 // Do not remove an already bound jump target.
532 while (last_bound_pos_ < pc_offset() &&
533 reloc_info_writer.last_pc() <= pc_ - kInstrSize &&
534 L->is_linked() && L->pos() == pc_offset() - kInstrSize &&
535 (((instr = instr_at(L->pos())) & CondMask) != nv && // not blx
536 (instr & 15*B24) == 10*B24)) { // b<cond>, but not bl<cond>
537 // Previous instruction is b<cond> jumping immediately after it
538 // => eliminate it
539 if (FLAG_print_jump_elimination)
540 PrintF("@ %d jump to next eliminated\n", L->pos());
541 // Remove first entry from label list.
542 next(L);
543 // Eliminate instruction (set code pointers back).
544 pc_ -= kInstrSize;
545 // Make sure not to skip relocation information when rewinding.
546 ASSERT(reloc_info_writer.last_pc() <= pc_);
547 }
548 // delay fixup of L => store it as unbound label
549 unbound_label_ = *L;
550 binding_pos_ = pc_offset();
551 L->Unuse();
552 }
553 bind_to(L, pc_offset());
554}
555
556
557void Assembler::next(Label* L) {
558 ASSERT(L->is_linked());
559 int link = target_at(L->pos());
560 if (link > 0) {
561 L->link_to(link);
562 } else {
563 ASSERT(link == kEndOfChain);
564 L->Unuse();
565 }
566}
567
568
569// Low-level code emission routines depending on the addressing mode
570static bool fits_shifter(uint32_t imm32,
571 uint32_t* rotate_imm,
572 uint32_t* immed_8,
573 Instr* instr) {
574 // imm32 must be unsigned
575 for (int rot = 0; rot < 16; rot++) {
576 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
577 if ((imm8 <= 0xff)) {
578 *rotate_imm = rot;
579 *immed_8 = imm8;
580 return true;
581 }
582 }
583 // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
584 if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
585 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
586 *instr ^= 0x2*B21;
587 return true;
588 }
589 }
590 return false;
591}
592
593
594void Assembler::addrmod1(Instr instr,
595 Register rn,
596 Register rd,
597 const Operand& x) {
598 CheckBuffer();
599 ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
600 if (!x.rm_.is_valid()) {
601 // immediate
602 uint32_t rotate_imm;
603 uint32_t immed_8;
604 if ((x.rmode_ != no_reloc && x.rmode_ != external_reference) ||
605 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
606 // The immediate operand cannot be encoded as a shifter operand, so load
607 // it first to register ip and change the original instruction to use ip.
608 // However, if the original instruction is a 'mov rd, x' (not setting the
609 // condition code), then replace it with a 'ldr rd, [pc]'
610 RecordRelocInfo(x.rmode_, x.imm32_);
611 ASSERT(!rn.is(ip)); // rn should never be ip, or will be trashed
612 Condition cond = static_cast<Condition>(instr & CondMask);
613 if ((instr & ~CondMask) == 13*B21) { // mov, S not set
614 ldr(rd, MemOperand(pc, 0), cond);
615 } else {
616 ldr(ip, MemOperand(pc, 0), cond);
617 addrmod1(instr, rn, rd, Operand(ip));
618 }
619 return;
620 }
621 instr |= I | rotate_imm*B8 | immed_8;
622 } else if (!x.rs_.is_valid()) {
623 // immediate shift
624 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
625 } else {
626 // register shift
627 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
628 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
629 }
630 emit(instr | rn.code()*B16 | rd.code()*B12);
631 if (rn.is(pc) || x.rm_.is(pc))
632 // block constant pool emission for one instruction after reading pc
633 BlockConstPoolBefore(pc_offset() + kInstrSize);
634}
635
636
637void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
638 ASSERT((instr & ~(CondMask | B | L)) == B26);
639 int am = x.am_;
640 if (!x.rm_.is_valid()) {
641 // immediate offset
642 int offset_12 = x.offset_;
643 if (offset_12 < 0) {
644 offset_12 = -offset_12;
645 am ^= U;
646 }
647 if (!is_uint12(offset_12)) {
648 // immediate offset cannot be encoded, load it first to register ip
649 // rn (and rd in a load) should never be ip, or will be trashed
650 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
651 mov(ip, Operand(x.offset_), LeaveCC,
652 static_cast<Condition>(instr & CondMask));
653 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
654 return;
655 }
656 ASSERT(offset_12 >= 0); // no masking needed
657 instr |= offset_12;
658 } else {
659 // register offset (shift_imm_ and shift_op_ are 0) or scaled
660 // register offset the constructors make sure than both shift_imm_
661 // and shift_op_ are initialized
662 ASSERT(!x.rm_.is(pc));
663 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
664 }
665 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
666 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
667}
668
669
670void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
671 ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
672 ASSERT(x.rn_.is_valid());
673 int am = x.am_;
674 if (!x.rm_.is_valid()) {
675 // immediate offset
676 int offset_8 = x.offset_;
677 if (offset_8 < 0) {
678 offset_8 = -offset_8;
679 am ^= U;
680 }
681 if (!is_uint8(offset_8)) {
682 // immediate offset cannot be encoded, load it first to register ip
683 // rn (and rd in a load) should never be ip, or will be trashed
684 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
685 mov(ip, Operand(x.offset_), LeaveCC,
686 static_cast<Condition>(instr & CondMask));
687 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
688 return;
689 }
690 ASSERT(offset_8 >= 0); // no masking needed
691 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
692 } else if (x.shift_imm_ != 0) {
693 // scaled register offset not supported, load index first
694 // rn (and rd in a load) should never be ip, or will be trashed
695 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
696 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
697 static_cast<Condition>(instr & CondMask));
698 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
699 return;
700 } else {
701 // register offset
702 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
703 instr |= x.rm_.code();
704 }
705 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
706 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
707}
708
709
710void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
711 ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
712 ASSERT(rl != 0);
713 ASSERT(!rn.is(pc));
714 emit(instr | rn.code()*B16 | rl);
715}
716
717
718void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
719 // unindexed addressing is not encoded by this function
720 ASSERT((instr & ~(CondMask | P | U | N | W | L)) == (B27 | B26));
721 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
722 int am = x.am_;
723 int offset_8 = x.offset_;
724 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
725 offset_8 >>= 2;
726 if (offset_8 < 0) {
727 offset_8 = -offset_8;
728 am ^= U;
729 }
730 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
731 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
732
733 // post-indexed addressing requires W == 1; different than in addrmod2/3
734 if ((am & P) == 0)
735 am |= W;
736
737 ASSERT(offset_8 >= 0); // no masking needed
738 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
739}
740
741
mads.s.ager@gmail.com769cc962008-08-06 10:02:49 +0000742int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000743 // if we emit an unconditional jump/call and if the current position is the
744 // target of the unbound label, we can change the binding position of the
v8.team.kasperl727e9952008-09-02 14:56:44 +0000745 // unbound label, thereby eliminating an unnecessary jump
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000746 bool can_eliminate = false;
mads.s.ager@gmail.com769cc962008-08-06 10:02:49 +0000747 if (jump_elimination_allowed && FLAG_eliminate_jumps &&
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000748 unbound_label_.is_linked() && binding_pos_ == pc_offset()) {
749 can_eliminate = true;
750 if (FLAG_print_jump_elimination) {
751 PrintF("eliminated jumps/calls to %d from ", binding_pos_);
752 print(&unbound_label_);
753 }
754 }
755 int target_pos;
756 if (L->is_bound()) {
757 target_pos = L->pos();
758 if (can_eliminate)
759 binding_pos_ = target_pos;
760 } else {
761 if (can_eliminate)
762 link_to(L, &unbound_label_); // may modify L's link
763 if (L->is_linked())
764 target_pos = L->pos(); // L's link
765 else
766 target_pos = kEndOfChain;
767 L->link_to(pc_offset());
768 }
769
770 // Block the emission of the constant pool, since the branch instruction must
771 // be emitted at the pc offset recorded by the label
772 BlockConstPoolBefore(pc_offset() + kInstrSize);
773
774 return target_pos - pc_offset() - 8;
775}
776
777
778// Branch instructions
779void Assembler::b(int branch_offset, Condition cond) {
780 ASSERT((branch_offset & 3) == 0);
781 int imm24 = branch_offset >> 2;
782 ASSERT(is_int24(imm24));
783 emit(cond | B27 | B25 | (imm24 & Imm24Mask));
784
785 if (cond == al)
786 // dead code is a good location to emit the constant pool
787 CheckConstPool(false, false);
788}
789
790
791void Assembler::bl(int branch_offset, Condition cond) {
792 ASSERT((branch_offset & 3) == 0);
793 int imm24 = branch_offset >> 2;
794 ASSERT(is_int24(imm24));
795 emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
796}
797
798
799void Assembler::blx(int branch_offset) { // v5 and above
800 ASSERT((branch_offset & 1) == 0);
801 int h = ((branch_offset & 2) >> 1)*B24;
802 int imm24 = branch_offset >> 2;
803 ASSERT(is_int24(imm24));
804 emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
805}
806
807
808void Assembler::blx(Register target, Condition cond) { // v5 and above
809 ASSERT(!target.is(pc));
810 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
811}
812
813
814void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
815 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
816 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
817}
818
819
820// Data-processing instructions
821void Assembler::and_(Register dst, Register src1, const Operand& src2,
822 SBit s, Condition cond) {
823 addrmod1(cond | 0*B21 | s, src1, dst, src2);
824}
825
826
827void Assembler::eor(Register dst, Register src1, const Operand& src2,
828 SBit s, Condition cond) {
829 addrmod1(cond | 1*B21 | s, src1, dst, src2);
830}
831
832
833void Assembler::sub(Register dst, Register src1, const Operand& src2,
834 SBit s, Condition cond) {
835 addrmod1(cond | 2*B21 | s, src1, dst, src2);
836}
837
838
839void Assembler::rsb(Register dst, Register src1, const Operand& src2,
840 SBit s, Condition cond) {
841 addrmod1(cond | 3*B21 | s, src1, dst, src2);
842}
843
844
845void Assembler::add(Register dst, Register src1, const Operand& src2,
846 SBit s, Condition cond) {
847 addrmod1(cond | 4*B21 | s, src1, dst, src2);
mads.s.ager31e71382008-08-13 09:32:07 +0000848
849 // Eliminate pattern: push(r), pop()
850 // str(src, MemOperand(sp, 4, NegPreIndex), al);
851 // add(sp, sp, Operand(kPointerSize));
852 // Both instructions can be eliminated.
853 int pattern_size = 2 * kInstrSize;
854 if (FLAG_push_pop_elimination &&
855 last_bound_pos_ <= (pc_offset() - pattern_size) &&
856 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
857 // pattern
858 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
859 (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
860 pc_ -= 2 * kInstrSize;
861 if (FLAG_print_push_pop_elimination) {
862 PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
863 }
864 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000865}
866
867
868void Assembler::adc(Register dst, Register src1, const Operand& src2,
869 SBit s, Condition cond) {
870 addrmod1(cond | 5*B21 | s, src1, dst, src2);
871}
872
873
874void Assembler::sbc(Register dst, Register src1, const Operand& src2,
875 SBit s, Condition cond) {
876 addrmod1(cond | 6*B21 | s, src1, dst, src2);
877}
878
879
880void Assembler::rsc(Register dst, Register src1, const Operand& src2,
881 SBit s, Condition cond) {
882 addrmod1(cond | 7*B21 | s, src1, dst, src2);
883}
884
885
886void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
887 addrmod1(cond | 8*B21 | S, src1, r0, src2);
888}
889
890
891void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
892 addrmod1(cond | 9*B21 | S, src1, r0, src2);
893}
894
895
896void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
897 addrmod1(cond | 10*B21 | S, src1, r0, src2);
898}
899
900
901void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
902 addrmod1(cond | 11*B21 | S, src1, r0, src2);
903}
904
905
906void Assembler::orr(Register dst, Register src1, const Operand& src2,
907 SBit s, Condition cond) {
908 addrmod1(cond | 12*B21 | s, src1, dst, src2);
909}
910
911
912void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
913 addrmod1(cond | 13*B21 | s, r0, dst, src);
914}
915
916
917void Assembler::bic(Register dst, Register src1, const Operand& src2,
918 SBit s, Condition cond) {
919 addrmod1(cond | 14*B21 | s, src1, dst, src2);
920}
921
922
923void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
924 addrmod1(cond | 15*B21 | s, r0, dst, src);
925}
926
927
928// Multiply instructions
929void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
930 SBit s, Condition cond) {
931 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
932 ASSERT(!dst.is(src1));
933 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
934 src2.code()*B8 | B7 | B4 | src1.code());
935}
936
937
938void Assembler::mul(Register dst, Register src1, Register src2,
939 SBit s, Condition cond) {
940 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
941 ASSERT(!dst.is(src1));
942 emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
943}
944
945
946void Assembler::smlal(Register dstL,
947 Register dstH,
948 Register src1,
949 Register src2,
950 SBit s,
951 Condition cond) {
952 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
953 ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
954 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
955 src2.code()*B8 | B7 | B4 | src1.code());
956}
957
958
959void Assembler::smull(Register dstL,
960 Register dstH,
961 Register src1,
962 Register src2,
963 SBit s,
964 Condition cond) {
965 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
966 ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
967 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
968 src2.code()*B8 | B7 | B4 | src1.code());
969}
970
971
972void Assembler::umlal(Register dstL,
973 Register dstH,
974 Register src1,
975 Register src2,
976 SBit s,
977 Condition cond) {
978 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
979 ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
980 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
981 src2.code()*B8 | B7 | B4 | src1.code());
982}
983
984
985void Assembler::umull(Register dstL,
986 Register dstH,
987 Register src1,
988 Register src2,
989 SBit s,
990 Condition cond) {
991 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
992 ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
993 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
994 src2.code()*B8 | B7 | B4 | src1.code());
995}
996
997
998// Miscellaneous arithmetic instructions
999void Assembler::clz(Register dst, Register src, Condition cond) {
1000 // v5 and above.
1001 ASSERT(!dst.is(pc) && !src.is(pc));
1002 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1003 15*B8 | B4 | src.code());
1004}
1005
1006
1007// Status register access instructions
1008void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1009 ASSERT(!dst.is(pc));
1010 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1011}
1012
1013
1014void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1015 Condition cond) {
1016 ASSERT(fields >= B16 && fields < B20); // at least one field set
1017 Instr instr;
1018 if (!src.rm_.is_valid()) {
1019 // immediate
1020 uint32_t rotate_imm;
1021 uint32_t immed_8;
1022 if ((src.rmode_ != no_reloc && src.rmode_ != external_reference)||
1023 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
1024 // immediate operand cannot be encoded, load it first to register ip
1025 RecordRelocInfo(src.rmode_, src.imm32_);
1026 ldr(ip, MemOperand(pc, 0), cond);
1027 msr(fields, Operand(ip), cond);
1028 return;
1029 }
1030 instr = I | rotate_imm*B8 | immed_8;
1031 } else {
1032 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
1033 instr = src.rm_.code();
1034 }
1035 emit(cond | instr | B24 | B21 | fields | 15*B12);
1036}
1037
1038
1039// Load/Store instructions
1040void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1041 addrmod2(cond | B26 | L, dst, src);
mads.s.ager31e71382008-08-13 09:32:07 +00001042
1043 // Eliminate pattern: push(r), pop(r)
1044 // str(r, MemOperand(sp, 4, NegPreIndex), al)
1045 // ldr(r, MemOperand(sp, 4, PostIndex), al)
1046 // Both instructions can be eliminated.
1047 int pattern_size = 2 * kInstrSize;
1048 if (FLAG_push_pop_elimination &&
1049 last_bound_pos_ <= (pc_offset() - pattern_size) &&
1050 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
1051 // pattern
1052 instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
1053 instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
1054 pc_ -= 2 * kInstrSize;
1055 if (FLAG_print_push_pop_elimination) {
1056 PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1057 }
1058 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001059}
1060
1061
1062void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1063 addrmod2(cond | B26, src, dst);
mads.s.ager31e71382008-08-13 09:32:07 +00001064
1065 // Eliminate pattern: pop(), push(r)
1066 // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
1067 // -> str r, [sp, 0], al
1068 int pattern_size = 2 * kInstrSize;
1069 if (FLAG_push_pop_elimination &&
1070 last_bound_pos_ <= (pc_offset() - pattern_size) &&
1071 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
1072 instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
1073 instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
1074 pc_ -= 2 * kInstrSize;
1075 emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
1076 if (FLAG_print_push_pop_elimination) {
1077 PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1078 }
1079 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001080}
1081
1082
1083void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1084 addrmod2(cond | B26 | B | L, dst, src);
1085}
1086
1087
1088void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1089 addrmod2(cond | B26 | B, src, dst);
1090}
1091
1092
1093void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1094 addrmod3(cond | L | B7 | H | B4, dst, src);
1095}
1096
1097
1098void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1099 addrmod3(cond | B7 | H | B4, src, dst);
1100}
1101
1102
1103void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1104 addrmod3(cond | L | B7 | S6 | B4, dst, src);
1105}
1106
1107
1108void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1109 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1110}
1111
1112
1113// Load/Store multiple instructions
1114void Assembler::ldm(BlockAddrMode am,
1115 Register base,
1116 RegList dst,
1117 Condition cond) {
1118 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable
1119 ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1120
1121 addrmod4(cond | B27 | am | L, base, dst);
1122
1123 // emit the constant pool after a function return implemented by ldm ..{..pc}
1124 if (cond == al && (dst & pc.bit()) != 0) {
1125 // There is a slight chance that the ldm instruction was actually a call,
1126 // in which case it would be wrong to return into the constant pool; we
1127 // recognize this case by checking if the emission of the pool was blocked
1128 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1129 // the case, we emit a jump over the pool.
1130 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1131 }
1132}
1133
1134
1135void Assembler::stm(BlockAddrMode am,
1136 Register base,
1137 RegList src,
1138 Condition cond) {
1139 addrmod4(cond | B27 | am, base, src);
1140}
1141
1142
1143// Semaphore instructions
1144void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
1145 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1146 ASSERT(!dst.is(base) && !src.is(base));
1147 emit(cond | P | base.code()*B16 | dst.code()*B12 |
1148 B7 | B4 | src.code());
1149}
1150
1151
1152void Assembler::swpb(Register dst,
1153 Register src,
1154 Register base,
1155 Condition cond) {
1156 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1157 ASSERT(!dst.is(base) && !src.is(base));
1158 emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
1159 B7 | B4 | src.code());
1160}
1161
1162
1163// Exception-generating instructions and debugging support
1164void Assembler::stop(const char* msg) {
kasper.lund7276f142008-07-30 08:49:36 +00001165#if !defined(__arm__)
1166 // The simulator handles these special instructions and stops execution.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001167 emit(15 << 28 | ((intptr_t) msg));
kasper.lund7276f142008-07-30 08:49:36 +00001168#else
1169 // Just issue a simple break instruction for now. Alternatively we could use
1170 // the swi(0x9f0001) instruction on Linux.
1171 bkpt(0);
1172#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001173}
1174
1175
1176void Assembler::bkpt(uint32_t imm16) { // v5 and above
1177 ASSERT(is_uint16(imm16));
1178 emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
1179}
1180
1181
1182void Assembler::swi(uint32_t imm24, Condition cond) {
1183 ASSERT(is_uint24(imm24));
1184 emit(cond | 15*B24 | imm24);
1185}
1186
1187
1188// Coprocessor instructions
1189void Assembler::cdp(Coprocessor coproc,
1190 int opcode_1,
1191 CRegister crd,
1192 CRegister crn,
1193 CRegister crm,
1194 int opcode_2,
1195 Condition cond) {
1196 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1197 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1198 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1199}
1200
1201
1202void Assembler::cdp2(Coprocessor coproc,
1203 int opcode_1,
1204 CRegister crd,
1205 CRegister crn,
1206 CRegister crm,
1207 int opcode_2) { // v5 and above
1208 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
1209}
1210
1211
1212void Assembler::mcr(Coprocessor coproc,
1213 int opcode_1,
1214 Register rd,
1215 CRegister crn,
1216 CRegister crm,
1217 int opcode_2,
1218 Condition cond) {
1219 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1220 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1221 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1222}
1223
1224
1225void Assembler::mcr2(Coprocessor coproc,
1226 int opcode_1,
1227 Register rd,
1228 CRegister crn,
1229 CRegister crm,
1230 int opcode_2) { // v5 and above
1231 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1232}
1233
1234
1235void Assembler::mrc(Coprocessor coproc,
1236 int opcode_1,
1237 Register rd,
1238 CRegister crn,
1239 CRegister crm,
1240 int opcode_2,
1241 Condition cond) {
1242 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1243 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1244 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1245}
1246
1247
1248void Assembler::mrc2(Coprocessor coproc,
1249 int opcode_1,
1250 Register rd,
1251 CRegister crn,
1252 CRegister crm,
1253 int opcode_2) { // v5 and above
1254 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1255}
1256
1257
1258void Assembler::ldc(Coprocessor coproc,
1259 CRegister crd,
1260 const MemOperand& src,
1261 LFlag l,
1262 Condition cond) {
1263 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1264}
1265
1266
1267void Assembler::ldc(Coprocessor coproc,
1268 CRegister crd,
1269 Register rn,
1270 int option,
1271 LFlag l,
1272 Condition cond) {
1273 // unindexed addressing
1274 ASSERT(is_uint8(option));
1275 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1276 coproc*B8 | (option & 255));
1277}
1278
1279
1280void Assembler::ldc2(Coprocessor coproc,
1281 CRegister crd,
1282 const MemOperand& src,
1283 LFlag l) { // v5 and above
1284 ldc(coproc, crd, src, l, static_cast<Condition>(nv));
1285}
1286
1287
1288void Assembler::ldc2(Coprocessor coproc,
1289 CRegister crd,
1290 Register rn,
1291 int option,
1292 LFlag l) { // v5 and above
1293 ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1294}
1295
1296
1297void Assembler::stc(Coprocessor coproc,
1298 CRegister crd,
1299 const MemOperand& dst,
1300 LFlag l,
1301 Condition cond) {
1302 addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
1303}
1304
1305
1306void Assembler::stc(Coprocessor coproc,
1307 CRegister crd,
1308 Register rn,
1309 int option,
1310 LFlag l,
1311 Condition cond) {
1312 // unindexed addressing
1313 ASSERT(is_uint8(option));
1314 emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
1315 coproc*B8 | (option & 255));
1316}
1317
1318
1319void Assembler::stc2(Coprocessor
1320 coproc, CRegister crd,
1321 const MemOperand& dst,
1322 LFlag l) { // v5 and above
1323 stc(coproc, crd, dst, l, static_cast<Condition>(nv));
1324}
1325
1326
1327void Assembler::stc2(Coprocessor coproc,
1328 CRegister crd,
1329 Register rn,
1330 int option,
1331 LFlag l) { // v5 and above
1332 stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1333}
1334
1335
1336// Pseudo instructions
1337void Assembler::lea(Register dst,
1338 const MemOperand& x,
1339 SBit s,
1340 Condition cond) {
1341 int am = x.am_;
1342 if (!x.rm_.is_valid()) {
1343 // immediate offset
1344 if ((am & P) == 0) // post indexing
1345 mov(dst, Operand(x.rn_), s, cond);
1346 else if ((am & U) == 0) // negative indexing
1347 sub(dst, x.rn_, Operand(x.offset_), s, cond);
1348 else
1349 add(dst, x.rn_, Operand(x.offset_), s, cond);
1350 } else {
1351 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1352 // register offset the constructors make sure than both shift_imm_
1353 // and shift_op_ are initialized.
1354 ASSERT(!x.rm_.is(pc));
1355 if ((am & P) == 0) // post indexing
1356 mov(dst, Operand(x.rn_), s, cond);
1357 else if ((am & U) == 0) // negative indexing
1358 sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1359 else
1360 add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1361 }
1362}
1363
1364
1365// Debugging
1366void Assembler::RecordComment(const char* msg) {
1367 if (FLAG_debug_code) {
1368 CheckBuffer();
1369 RecordRelocInfo(comment, reinterpret_cast<intptr_t>(msg));
1370 }
1371}
1372
1373
1374void Assembler::RecordPosition(int pos) {
1375 if (pos == kNoPosition) return;
1376 ASSERT(position >= 0);
1377 if (pos == last_position_) return;
1378 CheckBuffer();
1379 RecordRelocInfo(position, pos);
1380 last_position_ = pos;
1381 last_position_is_statement_ = false;
1382}
1383
1384
1385void Assembler::RecordStatementPosition(int pos) {
1386 if (pos == last_position_) return;
1387 CheckBuffer();
1388 RecordRelocInfo(statement_position, pos);
1389 last_position_ = pos;
1390 last_position_is_statement_ = true;
1391}
1392
1393
1394void Assembler::GrowBuffer() {
1395 if (!own_buffer_) FATAL("external code buffer is too small");
1396
1397 // compute new buffer size
1398 CodeDesc desc; // the new buffer
1399 if (buffer_size_ < 4*KB) {
1400 desc.buffer_size = 4*KB;
1401 } else if (buffer_size_ < 1*MB) {
1402 desc.buffer_size = 2*buffer_size_;
1403 } else {
1404 desc.buffer_size = buffer_size_ + 1*MB;
1405 }
1406 CHECK_GT(desc.buffer_size, 0); // no overflow
1407
1408 // setup new buffer
1409 desc.buffer = NewArray<byte>(desc.buffer_size);
1410
1411 desc.instr_size = pc_offset();
1412 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1413
1414 // copy the data
1415 int pc_delta = desc.buffer - buffer_;
1416 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1417 memmove(desc.buffer, buffer_, desc.instr_size);
1418 memmove(reloc_info_writer.pos() + rc_delta,
1419 reloc_info_writer.pos(), desc.reloc_size);
1420
1421 // switch buffers
1422 DeleteArray(buffer_);
1423 buffer_ = desc.buffer;
1424 buffer_size_ = desc.buffer_size;
1425 pc_ += pc_delta;
1426 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1427 reloc_info_writer.last_pc() + pc_delta);
1428
1429 // none of our relocation types are pc relative pointing outside the code
1430 // buffer nor pc absolute pointing inside the code buffer, so there is no need
1431 // to relocate any emitted relocation entries
1432
1433 // relocate pending relocation entries
1434 for (int i = 0; i < num_prinfo_; i++) {
1435 RelocInfo& rinfo = prinfo_[i];
1436 ASSERT(rinfo.rmode() != comment && rinfo.rmode() != position);
1437 rinfo.set_pc(rinfo.pc() + pc_delta);
1438 }
1439}
1440
1441
1442void Assembler::RecordRelocInfo(RelocMode rmode, intptr_t data) {
1443 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
1444 if (rmode >= comment && rmode <= statement_position) {
1445 // adjust code for new modes
1446 ASSERT(is_comment(rmode) || is_position(rmode));
1447 // these modes do not need an entry in the constant pool
1448 } else {
1449 ASSERT(num_prinfo_ < kMaxNumPRInfo);
1450 prinfo_[num_prinfo_++] = rinfo;
1451 // Make sure the constant pool is not emitted in place of the next
1452 // instruction for which we just recorded relocation info
1453 BlockConstPoolBefore(pc_offset() + kInstrSize);
1454 }
1455 if (rinfo.rmode() != no_reloc) {
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001456 // Don't record external references unless the heap will be serialized.
1457 if (rmode == external_reference &&
1458 !Serializer::enabled() &&
1459 !FLAG_debug_code) {
1460 return;
1461 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001462 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
1463 reloc_info_writer.Write(&rinfo);
1464 }
1465}
1466
1467
1468void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
1469 // Calculate the offset of the next check. It will be overwritten
1470 // when a const pool is generated or when const pools are being
1471 // blocked for a specific range.
1472 next_buffer_check_ = pc_offset() + kCheckConstInterval;
1473
1474 // There is nothing to do if there are no pending relocation info entries
1475 if (num_prinfo_ == 0) return;
1476
1477 // We emit a constant pool at regular intervals of about kDistBetweenPools
1478 // or when requested by parameter force_emit (e.g. after each function).
1479 // We prefer not to emit a jump unless the max distance is reached or if we
1480 // are running low on slots, which can happen if a lot of constants are being
1481 // emitted (e.g. --debug-code and many static references).
1482 int dist = pc_offset() - last_const_pool_end_;
1483 if (!force_emit && dist < kMaxDistBetweenPools &&
1484 (require_jump || dist < kDistBetweenPools) &&
1485 // TODO(1236125): Cleanup the "magic" number below. We know that
1486 // the code generation will test every kCheckConstIntervalInst.
1487 // Thus we are safe as long as we generate less than 7 constant
1488 // entries per instruction.
1489 (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
1490 return;
1491 }
1492
1493 // If we did not return by now, we need to emit the constant pool soon.
1494
1495 // However, some small sequences of instructions must not be broken up by the
1496 // insertion of a constant pool; such sequences are protected by setting
1497 // no_const_pool_before_, which is checked here. Also, recursive calls to
1498 // CheckConstPool are blocked by no_const_pool_before_.
1499 if (pc_offset() < no_const_pool_before_) {
1500 // Emission is currently blocked; make sure we try again as soon as possible
1501 next_buffer_check_ = no_const_pool_before_;
1502
1503 // Something is wrong if emission is forced and blocked at the same time
1504 ASSERT(!force_emit);
1505 return;
1506 }
1507
1508 int jump_instr = require_jump ? kInstrSize : 0;
1509
1510 // Check that the code buffer is large enough before emitting the constant
1511 // pool and relocation information (include the jump over the pool and the
1512 // constant pool marker).
1513 int max_needed_space =
1514 jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
1515 while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
1516
1517 // Block recursive calls to CheckConstPool
1518 BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
1519 num_prinfo_*kInstrSize);
1520 // Don't bother to check for the emit calls below.
1521 next_buffer_check_ = no_const_pool_before_;
1522
1523 // Emit jump over constant pool if necessary
1524 Label after_pool;
1525 if (require_jump) b(&after_pool);
1526
1527 RecordComment("[ Constant Pool");
1528
1529 // Put down constant pool marker
1530 // "Undefined instruction" as specified by A3.1 Instruction set encoding
1531 emit(0x03000000 | num_prinfo_);
1532
1533 // Emit constant pool entries
1534 for (int i = 0; i < num_prinfo_; i++) {
1535 RelocInfo& rinfo = prinfo_[i];
1536 ASSERT(rinfo.rmode() != comment && rinfo.rmode() != position &&
1537 rinfo.rmode() != statement_position);
1538 Instr instr = instr_at(rinfo.pc());
1539 // Instruction to patch must be a ldr/str [pc, #offset]
1540 // P and U set, B and W clear, Rn == pc, offset12 still 0
1541 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
1542 (2*B25 | P | U | pc.code()*B16));
1543 int delta = pc_ - rinfo.pc() - 8;
1544 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
1545 if (delta < 0) {
1546 instr &= ~U;
1547 delta = -delta;
1548 }
1549 ASSERT(is_uint12(delta));
1550 instr_at_put(rinfo.pc(), instr + delta);
1551 emit(rinfo.data());
1552 }
1553 num_prinfo_ = 0;
1554 last_const_pool_end_ = pc_offset();
1555
1556 RecordComment("]");
1557
1558 if (after_pool.is_linked()) {
1559 bind(&after_pool);
1560 }
1561
1562 // Since a constant pool was just emitted, move the check offset forward by
1563 // the standard interval.
1564 next_buffer_check_ = pc_offset() + kCheckConstInterval;
1565}
1566
1567
1568} } // namespace v8::internal