blob: bc3b8e6447304a3f523f73e43cfe225fce3f651a [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
33// The original source code covered by the above license above has been modified
34// significantly by Google Inc.
35// Copyright 2006-2008 the V8 project authors. All rights reserved.
36
37#include "v8.h"
38
39#include "arm/assembler-arm-inl.h"
40#include "serialize.h"
41
42namespace v8 {
43namespace internal {
44
45// -----------------------------------------------------------------------------
46// Implementation of Register and CRegister
47
48Register no_reg = { -1 };
49
50Register r0 = { 0 };
51Register r1 = { 1 };
52Register r2 = { 2 };
53Register r3 = { 3 };
54Register r4 = { 4 };
55Register r5 = { 5 };
56Register r6 = { 6 };
57Register r7 = { 7 };
58Register r8 = { 8 };
59Register r9 = { 9 };
60Register r10 = { 10 };
61Register fp = { 11 };
62Register ip = { 12 };
63Register sp = { 13 };
64Register lr = { 14 };
65Register pc = { 15 };
66
67
68CRegister no_creg = { -1 };
69
70CRegister cr0 = { 0 };
71CRegister cr1 = { 1 };
72CRegister cr2 = { 2 };
73CRegister cr3 = { 3 };
74CRegister cr4 = { 4 };
75CRegister cr5 = { 5 };
76CRegister cr6 = { 6 };
77CRegister cr7 = { 7 };
78CRegister cr8 = { 8 };
79CRegister cr9 = { 9 };
80CRegister cr10 = { 10 };
81CRegister cr11 = { 11 };
82CRegister cr12 = { 12 };
83CRegister cr13 = { 13 };
84CRegister cr14 = { 14 };
85CRegister cr15 = { 15 };
86
87
88// -----------------------------------------------------------------------------
89// Implementation of RelocInfo
90
91const int RelocInfo::kApplyMask = 0;
92
93
94void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
95 // Patch the code at the current address with the supplied instructions.
96 Instr* pc = reinterpret_cast<Instr*>(pc_);
97 Instr* instr = reinterpret_cast<Instr*>(instructions);
98 for (int i = 0; i < instruction_count; i++) {
99 *(pc + i) = *(instr + i);
100 }
101
102 // Indicate that code has changed.
103 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
104}
105
106
107// Patch the code at the current PC with a call to the target address.
108// Additional guard instructions can be added if required.
109void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
110 // Patch the code at the current address with a call to the target.
111 UNIMPLEMENTED();
112}
113
114
115// -----------------------------------------------------------------------------
116// Implementation of Operand and MemOperand
117// See assembler-arm-inl.h for inlined constructors
118
119Operand::Operand(Handle<Object> handle) {
120 rm_ = no_reg;
121 // Verify all Objects referred by code are NOT in new space.
122 Object* obj = *handle;
123 ASSERT(!Heap::InNewSpace(obj));
124 if (obj->IsHeapObject()) {
125 imm32_ = reinterpret_cast<intptr_t>(handle.location());
126 rmode_ = RelocInfo::EMBEDDED_OBJECT;
127 } else {
128 // no relocation needed
129 imm32_ = reinterpret_cast<intptr_t>(obj);
130 rmode_ = RelocInfo::NONE;
131 }
132}
133
134
135Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
136 ASSERT(is_uint5(shift_imm));
137 ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
138 rm_ = rm;
139 rs_ = no_reg;
140 shift_op_ = shift_op;
141 shift_imm_ = shift_imm & 31;
142 if (shift_op == RRX) {
143 // encoded as ROR with shift_imm == 0
144 ASSERT(shift_imm == 0);
145 shift_op_ = ROR;
146 shift_imm_ = 0;
147 }
148}
149
150
151Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
152 ASSERT(shift_op != RRX);
153 rm_ = rm;
154 rs_ = no_reg;
155 shift_op_ = shift_op;
156 rs_ = rs;
157}
158
159
160MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
161 rn_ = rn;
162 rm_ = no_reg;
163 offset_ = offset;
164 am_ = am;
165}
166
167MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
168 rn_ = rn;
169 rm_ = rm;
170 shift_op_ = LSL;
171 shift_imm_ = 0;
172 am_ = am;
173}
174
175
176MemOperand::MemOperand(Register rn, Register rm,
177 ShiftOp shift_op, int shift_imm, AddrMode am) {
178 ASSERT(is_uint5(shift_imm));
179 rn_ = rn;
180 rm_ = rm;
181 shift_op_ = shift_op;
182 shift_imm_ = shift_imm & 31;
183 am_ = am;
184}
185
186
187// -----------------------------------------------------------------------------
188// Implementation of Assembler
189
190// Instruction encoding bits
191enum {
192 H = 1 << 5, // halfword (or byte)
193 S6 = 1 << 6, // signed (or unsigned)
194 L = 1 << 20, // load (or store)
195 S = 1 << 20, // set condition code (or leave unchanged)
196 W = 1 << 21, // writeback base register (or leave unchanged)
197 A = 1 << 21, // accumulate in multiply instruction (or not)
198 B = 1 << 22, // unsigned byte (or word)
199 N = 1 << 22, // long (or short)
200 U = 1 << 23, // positive (or negative) offset/index
201 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
202 I = 1 << 25, // immediate shifter operand (or not)
203
204 B4 = 1 << 4,
205 B5 = 1 << 5,
206 B7 = 1 << 7,
207 B8 = 1 << 8,
208 B12 = 1 << 12,
209 B16 = 1 << 16,
210 B20 = 1 << 20,
211 B21 = 1 << 21,
212 B22 = 1 << 22,
213 B23 = 1 << 23,
214 B24 = 1 << 24,
215 B25 = 1 << 25,
216 B26 = 1 << 26,
217 B27 = 1 << 27,
218
219 // Instruction bit masks
220 RdMask = 15 << 12, // in str instruction
221 CondMask = 15 << 28,
222 CoprocessorMask = 15 << 8,
223 OpCodeMask = 15 << 21, // in data-processing instructions
224 Imm24Mask = (1 << 24) - 1,
225 Off12Mask = (1 << 12) - 1,
226 // Reserved condition
227 nv = 15 << 28
228};
229
230
231// add(sp, sp, 4) instruction (aka Pop())
232static const Instr kPopInstruction =
233 al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
234// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
235// register r is not encoded.
236static const Instr kPushRegPattern =
237 al | B26 | 4 | NegPreIndex | sp.code() * B16;
238// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
239// register r is not encoded.
240static const Instr kPopRegPattern =
241 al | B26 | L | 4 | PostIndex | sp.code() * B16;
242// mov lr, pc
243const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
244// ldr pc, [pc, #XXX]
245const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
246
247// spare_buffer_
248static const int kMinimalBufferSize = 4*KB;
249static byte* spare_buffer_ = NULL;
250
251Assembler::Assembler(void* buffer, int buffer_size) {
252 if (buffer == NULL) {
253 // do our own buffer management
254 if (buffer_size <= kMinimalBufferSize) {
255 buffer_size = kMinimalBufferSize;
256
257 if (spare_buffer_ != NULL) {
258 buffer = spare_buffer_;
259 spare_buffer_ = NULL;
260 }
261 }
262 if (buffer == NULL) {
263 buffer_ = NewArray<byte>(buffer_size);
264 } else {
265 buffer_ = static_cast<byte*>(buffer);
266 }
267 buffer_size_ = buffer_size;
268 own_buffer_ = true;
269
270 } else {
271 // use externally provided buffer instead
272 ASSERT(buffer_size > 0);
273 buffer_ = static_cast<byte*>(buffer);
274 buffer_size_ = buffer_size;
275 own_buffer_ = false;
276 }
277
278 // setup buffer pointers
279 ASSERT(buffer_ != NULL);
280 pc_ = buffer_;
281 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
282 num_prinfo_ = 0;
283 next_buffer_check_ = 0;
284 no_const_pool_before_ = 0;
285 last_const_pool_end_ = 0;
286 last_bound_pos_ = 0;
287 current_statement_position_ = RelocInfo::kNoPosition;
288 current_position_ = RelocInfo::kNoPosition;
289 written_statement_position_ = current_statement_position_;
290 written_position_ = current_position_;
291}
292
293
294Assembler::~Assembler() {
295 if (own_buffer_) {
296 if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
297 spare_buffer_ = buffer_;
298 } else {
299 DeleteArray(buffer_);
300 }
301 }
302}
303
304
305void Assembler::GetCode(CodeDesc* desc) {
306 // emit constant pool if necessary
307 CheckConstPool(true, false);
308 ASSERT(num_prinfo_ == 0);
309
310 // setup desc
311 desc->buffer = buffer_;
312 desc->buffer_size = buffer_size_;
313 desc->instr_size = pc_offset();
314 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
315}
316
317
318void Assembler::Align(int m) {
319 ASSERT(m >= 4 && IsPowerOf2(m));
320 while ((pc_offset() & (m - 1)) != 0) {
321 nop();
322 }
323}
324
325
326// Labels refer to positions in the (to be) generated code.
327// There are bound, linked, and unused labels.
328//
329// Bound labels refer to known positions in the already
330// generated code. pos() is the position the label refers to.
331//
332// Linked labels refer to unknown positions in the code
333// to be generated; pos() is the position of the last
334// instruction using the label.
335
336
337// The link chain is terminated by a negative code position (must be aligned)
338const int kEndOfChain = -4;
339
340
341int Assembler::target_at(int pos) {
342 Instr instr = instr_at(pos);
343 if ((instr & ~Imm24Mask) == 0) {
344 // Emitted label constant, not part of a branch.
345 return instr - (Code::kHeaderSize - kHeapObjectTag);
346 }
347 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
348 int imm26 = ((instr & Imm24Mask) << 8) >> 6;
349 if ((instr & CondMask) == nv && (instr & B24) != 0)
350 // blx uses bit 24 to encode bit 2 of imm26
351 imm26 += 2;
352
353 return pos + kPcLoadDelta + imm26;
354}
355
356
357void Assembler::target_at_put(int pos, int target_pos) {
358 Instr instr = instr_at(pos);
359 if ((instr & ~Imm24Mask) == 0) {
360 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
361 // Emitted label constant, not part of a branch.
362 // Make label relative to Code* of generated Code object.
363 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
364 return;
365 }
366 int imm26 = target_pos - (pos + kPcLoadDelta);
367 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
368 if ((instr & CondMask) == nv) {
369 // blx uses bit 24 to encode bit 2 of imm26
370 ASSERT((imm26 & 1) == 0);
371 instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
372 } else {
373 ASSERT((imm26 & 3) == 0);
374 instr &= ~Imm24Mask;
375 }
376 int imm24 = imm26 >> 2;
377 ASSERT(is_int24(imm24));
378 instr_at_put(pos, instr | (imm24 & Imm24Mask));
379}
380
381
382void Assembler::print(Label* L) {
383 if (L->is_unused()) {
384 PrintF("unused label\n");
385 } else if (L->is_bound()) {
386 PrintF("bound label to %d\n", L->pos());
387 } else if (L->is_linked()) {
388 Label l = *L;
389 PrintF("unbound label");
390 while (l.is_linked()) {
391 PrintF("@ %d ", l.pos());
392 Instr instr = instr_at(l.pos());
393 if ((instr & ~Imm24Mask) == 0) {
394 PrintF("value\n");
395 } else {
396 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
397 int cond = instr & CondMask;
398 const char* b;
399 const char* c;
400 if (cond == nv) {
401 b = "blx";
402 c = "";
403 } else {
404 if ((instr & B24) != 0)
405 b = "bl";
406 else
407 b = "b";
408
409 switch (cond) {
410 case eq: c = "eq"; break;
411 case ne: c = "ne"; break;
412 case hs: c = "hs"; break;
413 case lo: c = "lo"; break;
414 case mi: c = "mi"; break;
415 case pl: c = "pl"; break;
416 case vs: c = "vs"; break;
417 case vc: c = "vc"; break;
418 case hi: c = "hi"; break;
419 case ls: c = "ls"; break;
420 case ge: c = "ge"; break;
421 case lt: c = "lt"; break;
422 case gt: c = "gt"; break;
423 case le: c = "le"; break;
424 case al: c = ""; break;
425 default:
426 c = "";
427 UNREACHABLE();
428 }
429 }
430 PrintF("%s%s\n", b, c);
431 }
432 next(&l);
433 }
434 } else {
435 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
436 }
437}
438
439
440void Assembler::bind_to(Label* L, int pos) {
441 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
442 while (L->is_linked()) {
443 int fixup_pos = L->pos();
444 next(L); // call next before overwriting link with target at fixup_pos
445 target_at_put(fixup_pos, pos);
446 }
447 L->bind_to(pos);
448
449 // Keep track of the last bound label so we don't eliminate any instructions
450 // before a bound label.
451 if (pos > last_bound_pos_)
452 last_bound_pos_ = pos;
453}
454
455
456void Assembler::link_to(Label* L, Label* appendix) {
457 if (appendix->is_linked()) {
458 if (L->is_linked()) {
459 // append appendix to L's list
460 int fixup_pos;
461 int link = L->pos();
462 do {
463 fixup_pos = link;
464 link = target_at(fixup_pos);
465 } while (link > 0);
466 ASSERT(link == kEndOfChain);
467 target_at_put(fixup_pos, appendix->pos());
468 } else {
469 // L is empty, simply use appendix
470 *L = *appendix;
471 }
472 }
473 appendix->Unuse(); // appendix should not be used anymore
474}
475
476
477void Assembler::bind(Label* L) {
478 ASSERT(!L->is_bound()); // label can only be bound once
479 bind_to(L, pc_offset());
480}
481
482
483void Assembler::next(Label* L) {
484 ASSERT(L->is_linked());
485 int link = target_at(L->pos());
486 if (link > 0) {
487 L->link_to(link);
488 } else {
489 ASSERT(link == kEndOfChain);
490 L->Unuse();
491 }
492}
493
494
495// Low-level code emission routines depending on the addressing mode
496static bool fits_shifter(uint32_t imm32,
497 uint32_t* rotate_imm,
498 uint32_t* immed_8,
499 Instr* instr) {
500 // imm32 must be unsigned
501 for (int rot = 0; rot < 16; rot++) {
502 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
503 if ((imm8 <= 0xff)) {
504 *rotate_imm = rot;
505 *immed_8 = imm8;
506 return true;
507 }
508 }
509 // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
510 if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
511 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
512 *instr ^= 0x2*B21;
513 return true;
514 }
515 }
516 return false;
517}
518
519
520// We have to use the temporary register for things that can be relocated even
521// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
522// space. There is no guarantee that the relocated location can be similarly
523// encoded.
524static bool MustUseIp(RelocInfo::Mode rmode) {
525 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
526 return Serializer::enabled();
527 } else if (rmode == RelocInfo::NONE) {
528 return false;
529 }
530 return true;
531}
532
533
534void Assembler::addrmod1(Instr instr,
535 Register rn,
536 Register rd,
537 const Operand& x) {
538 CheckBuffer();
539 ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
540 if (!x.rm_.is_valid()) {
541 // immediate
542 uint32_t rotate_imm;
543 uint32_t immed_8;
544 if (MustUseIp(x.rmode_) ||
545 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
546 // The immediate operand cannot be encoded as a shifter operand, so load
547 // it first to register ip and change the original instruction to use ip.
548 // However, if the original instruction is a 'mov rd, x' (not setting the
549 // condition code), then replace it with a 'ldr rd, [pc]'
550 RecordRelocInfo(x.rmode_, x.imm32_);
551 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
552 Condition cond = static_cast<Condition>(instr & CondMask);
553 if ((instr & ~CondMask) == 13*B21) { // mov, S not set
554 ldr(rd, MemOperand(pc, 0), cond);
555 } else {
556 ldr(ip, MemOperand(pc, 0), cond);
557 addrmod1(instr, rn, rd, Operand(ip));
558 }
559 return;
560 }
561 instr |= I | rotate_imm*B8 | immed_8;
562 } else if (!x.rs_.is_valid()) {
563 // immediate shift
564 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
565 } else {
566 // register shift
567 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
568 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
569 }
570 emit(instr | rn.code()*B16 | rd.code()*B12);
571 if (rn.is(pc) || x.rm_.is(pc))
572 // block constant pool emission for one instruction after reading pc
573 BlockConstPoolBefore(pc_offset() + kInstrSize);
574}
575
576
577void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
578 ASSERT((instr & ~(CondMask | B | L)) == B26);
579 int am = x.am_;
580 if (!x.rm_.is_valid()) {
581 // immediate offset
582 int offset_12 = x.offset_;
583 if (offset_12 < 0) {
584 offset_12 = -offset_12;
585 am ^= U;
586 }
587 if (!is_uint12(offset_12)) {
588 // immediate offset cannot be encoded, load it first to register ip
589 // rn (and rd in a load) should never be ip, or will be trashed
590 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
591 mov(ip, Operand(x.offset_), LeaveCC,
592 static_cast<Condition>(instr & CondMask));
593 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
594 return;
595 }
596 ASSERT(offset_12 >= 0); // no masking needed
597 instr |= offset_12;
598 } else {
599 // register offset (shift_imm_ and shift_op_ are 0) or scaled
600 // register offset the constructors make sure than both shift_imm_
601 // and shift_op_ are initialized
602 ASSERT(!x.rm_.is(pc));
603 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
604 }
605 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
606 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
607}
608
609
610void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
611 ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
612 ASSERT(x.rn_.is_valid());
613 int am = x.am_;
614 if (!x.rm_.is_valid()) {
615 // immediate offset
616 int offset_8 = x.offset_;
617 if (offset_8 < 0) {
618 offset_8 = -offset_8;
619 am ^= U;
620 }
621 if (!is_uint8(offset_8)) {
622 // immediate offset cannot be encoded, load it first to register ip
623 // rn (and rd in a load) should never be ip, or will be trashed
624 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
625 mov(ip, Operand(x.offset_), LeaveCC,
626 static_cast<Condition>(instr & CondMask));
627 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
628 return;
629 }
630 ASSERT(offset_8 >= 0); // no masking needed
631 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
632 } else if (x.shift_imm_ != 0) {
633 // scaled register offset not supported, load index first
634 // rn (and rd in a load) should never be ip, or will be trashed
635 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
636 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
637 static_cast<Condition>(instr & CondMask));
638 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
639 return;
640 } else {
641 // register offset
642 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
643 instr |= x.rm_.code();
644 }
645 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
646 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
647}
648
649
650void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
651 ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
652 ASSERT(rl != 0);
653 ASSERT(!rn.is(pc));
654 emit(instr | rn.code()*B16 | rl);
655}
656
657
658void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
659 // unindexed addressing is not encoded by this function
660 ASSERT_EQ((B27 | B26),
661 (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
662 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
663 int am = x.am_;
664 int offset_8 = x.offset_;
665 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
666 offset_8 >>= 2;
667 if (offset_8 < 0) {
668 offset_8 = -offset_8;
669 am ^= U;
670 }
671 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
672 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
673
674 // post-indexed addressing requires W == 1; different than in addrmod2/3
675 if ((am & P) == 0)
676 am |= W;
677
678 ASSERT(offset_8 >= 0); // no masking needed
679 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
680}
681
682
683int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
684 int target_pos;
685 if (L->is_bound()) {
686 target_pos = L->pos();
687 } else {
688 if (L->is_linked()) {
689 target_pos = L->pos(); // L's link
690 } else {
691 target_pos = kEndOfChain;
692 }
693 L->link_to(pc_offset());
694 }
695
696 // Block the emission of the constant pool, since the branch instruction must
697 // be emitted at the pc offset recorded by the label
698 BlockConstPoolBefore(pc_offset() + kInstrSize);
699 return target_pos - (pc_offset() + kPcLoadDelta);
700}
701
702
703void Assembler::label_at_put(Label* L, int at_offset) {
704 int target_pos;
705 if (L->is_bound()) {
706 target_pos = L->pos();
707 } else {
708 if (L->is_linked()) {
709 target_pos = L->pos(); // L's link
710 } else {
711 target_pos = kEndOfChain;
712 }
713 L->link_to(at_offset);
714 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
715 }
716}
717
718
719// Branch instructions
720void Assembler::b(int branch_offset, Condition cond) {
721 ASSERT((branch_offset & 3) == 0);
722 int imm24 = branch_offset >> 2;
723 ASSERT(is_int24(imm24));
724 emit(cond | B27 | B25 | (imm24 & Imm24Mask));
725
726 if (cond == al)
727 // dead code is a good location to emit the constant pool
728 CheckConstPool(false, false);
729}
730
731
732void Assembler::bl(int branch_offset, Condition cond) {
733 ASSERT((branch_offset & 3) == 0);
734 int imm24 = branch_offset >> 2;
735 ASSERT(is_int24(imm24));
736 emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
737}
738
739
740void Assembler::blx(int branch_offset) { // v5 and above
741 WriteRecordedPositions();
742 ASSERT((branch_offset & 1) == 0);
743 int h = ((branch_offset & 2) >> 1)*B24;
744 int imm24 = branch_offset >> 2;
745 ASSERT(is_int24(imm24));
746 emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
747}
748
749
750void Assembler::blx(Register target, Condition cond) { // v5 and above
751 WriteRecordedPositions();
752 ASSERT(!target.is(pc));
753 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
754}
755
756
757void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
758 WriteRecordedPositions();
759 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
760 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
761}
762
763
764// Data-processing instructions
765void Assembler::and_(Register dst, Register src1, const Operand& src2,
766 SBit s, Condition cond) {
767 addrmod1(cond | 0*B21 | s, src1, dst, src2);
768}
769
770
771void Assembler::eor(Register dst, Register src1, const Operand& src2,
772 SBit s, Condition cond) {
773 addrmod1(cond | 1*B21 | s, src1, dst, src2);
774}
775
776
777void Assembler::sub(Register dst, Register src1, const Operand& src2,
778 SBit s, Condition cond) {
779 addrmod1(cond | 2*B21 | s, src1, dst, src2);
780}
781
782
783void Assembler::rsb(Register dst, Register src1, const Operand& src2,
784 SBit s, Condition cond) {
785 addrmod1(cond | 3*B21 | s, src1, dst, src2);
786}
787
788
789void Assembler::add(Register dst, Register src1, const Operand& src2,
790 SBit s, Condition cond) {
791 addrmod1(cond | 4*B21 | s, src1, dst, src2);
792
793 // Eliminate pattern: push(r), pop()
794 // str(src, MemOperand(sp, 4, NegPreIndex), al);
795 // add(sp, sp, Operand(kPointerSize));
796 // Both instructions can be eliminated.
797 int pattern_size = 2 * kInstrSize;
798 if (FLAG_push_pop_elimination &&
799 last_bound_pos_ <= (pc_offset() - pattern_size) &&
800 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
801 // pattern
802 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
803 (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
804 pc_ -= 2 * kInstrSize;
805 if (FLAG_print_push_pop_elimination) {
806 PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
807 }
808 }
809}
810
811
812void Assembler::adc(Register dst, Register src1, const Operand& src2,
813 SBit s, Condition cond) {
814 addrmod1(cond | 5*B21 | s, src1, dst, src2);
815}
816
817
818void Assembler::sbc(Register dst, Register src1, const Operand& src2,
819 SBit s, Condition cond) {
820 addrmod1(cond | 6*B21 | s, src1, dst, src2);
821}
822
823
824void Assembler::rsc(Register dst, Register src1, const Operand& src2,
825 SBit s, Condition cond) {
826 addrmod1(cond | 7*B21 | s, src1, dst, src2);
827}
828
829
830void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
831 addrmod1(cond | 8*B21 | S, src1, r0, src2);
832}
833
834
835void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
836 addrmod1(cond | 9*B21 | S, src1, r0, src2);
837}
838
839
840void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
841 addrmod1(cond | 10*B21 | S, src1, r0, src2);
842}
843
844
845void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
846 addrmod1(cond | 11*B21 | S, src1, r0, src2);
847}
848
849
850void Assembler::orr(Register dst, Register src1, const Operand& src2,
851 SBit s, Condition cond) {
852 addrmod1(cond | 12*B21 | s, src1, dst, src2);
853}
854
855
856void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
857 if (dst.is(pc)) {
858 WriteRecordedPositions();
859 }
860 addrmod1(cond | 13*B21 | s, r0, dst, src);
861}
862
863
864void Assembler::bic(Register dst, Register src1, const Operand& src2,
865 SBit s, Condition cond) {
866 addrmod1(cond | 14*B21 | s, src1, dst, src2);
867}
868
869
870void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
871 addrmod1(cond | 15*B21 | s, r0, dst, src);
872}
873
874
875// Multiply instructions
876void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
877 SBit s, Condition cond) {
878 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
879 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
880 src2.code()*B8 | B7 | B4 | src1.code());
881}
882
883
884void Assembler::mul(Register dst, Register src1, Register src2,
885 SBit s, Condition cond) {
886 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
887 // dst goes in bits 16-19 for this instruction!
888 emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
889}
890
891
892void Assembler::smlal(Register dstL,
893 Register dstH,
894 Register src1,
895 Register src2,
896 SBit s,
897 Condition cond) {
898 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
899 ASSERT(!dstL.is(dstH));
900 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
901 src2.code()*B8 | B7 | B4 | src1.code());
902}
903
904
905void Assembler::smull(Register dstL,
906 Register dstH,
907 Register src1,
908 Register src2,
909 SBit s,
910 Condition cond) {
911 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
912 ASSERT(!dstL.is(dstH));
913 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
914 src2.code()*B8 | B7 | B4 | src1.code());
915}
916
917
918void Assembler::umlal(Register dstL,
919 Register dstH,
920 Register src1,
921 Register src2,
922 SBit s,
923 Condition cond) {
924 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
925 ASSERT(!dstL.is(dstH));
926 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
927 src2.code()*B8 | B7 | B4 | src1.code());
928}
929
930
931void Assembler::umull(Register dstL,
932 Register dstH,
933 Register src1,
934 Register src2,
935 SBit s,
936 Condition cond) {
937 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
938 ASSERT(!dstL.is(dstH));
939 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
940 src2.code()*B8 | B7 | B4 | src1.code());
941}
942
943
944// Miscellaneous arithmetic instructions
945void Assembler::clz(Register dst, Register src, Condition cond) {
946 // v5 and above.
947 ASSERT(!dst.is(pc) && !src.is(pc));
948 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
949 15*B8 | B4 | src.code());
950}
951
952
953// Status register access instructions
954void Assembler::mrs(Register dst, SRegister s, Condition cond) {
955 ASSERT(!dst.is(pc));
956 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
957}
958
959
960void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
961 Condition cond) {
962 ASSERT(fields >= B16 && fields < B20); // at least one field set
963 Instr instr;
964 if (!src.rm_.is_valid()) {
965 // immediate
966 uint32_t rotate_imm;
967 uint32_t immed_8;
968 if (MustUseIp(src.rmode_) ||
969 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
970 // immediate operand cannot be encoded, load it first to register ip
971 RecordRelocInfo(src.rmode_, src.imm32_);
972 ldr(ip, MemOperand(pc, 0), cond);
973 msr(fields, Operand(ip), cond);
974 return;
975 }
976 instr = I | rotate_imm*B8 | immed_8;
977 } else {
978 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
979 instr = src.rm_.code();
980 }
981 emit(cond | instr | B24 | B21 | fields | 15*B12);
982}
983
984
985// Load/Store instructions
986void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
987 if (dst.is(pc)) {
988 WriteRecordedPositions();
989 }
990 addrmod2(cond | B26 | L, dst, src);
991
992 // Eliminate pattern: push(r), pop(r)
993 // str(r, MemOperand(sp, 4, NegPreIndex), al)
994 // ldr(r, MemOperand(sp, 4, PostIndex), al)
995 // Both instructions can be eliminated.
996 int pattern_size = 2 * kInstrSize;
997 if (FLAG_push_pop_elimination &&
998 last_bound_pos_ <= (pc_offset() - pattern_size) &&
999 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
1000 // pattern
1001 instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
1002 instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
1003 pc_ -= 2 * kInstrSize;
1004 if (FLAG_print_push_pop_elimination) {
1005 PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1006 }
1007 }
1008}
1009
1010
1011void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1012 addrmod2(cond | B26, src, dst);
1013
1014 // Eliminate pattern: pop(), push(r)
1015 // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
1016 // -> str r, [sp, 0], al
1017 int pattern_size = 2 * kInstrSize;
1018 if (FLAG_push_pop_elimination &&
1019 last_bound_pos_ <= (pc_offset() - pattern_size) &&
1020 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
1021 instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
1022 instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
1023 pc_ -= 2 * kInstrSize;
1024 emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
1025 if (FLAG_print_push_pop_elimination) {
1026 PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1027 }
1028 }
1029}
1030
1031
1032void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1033 addrmod2(cond | B26 | B | L, dst, src);
1034}
1035
1036
1037void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1038 addrmod2(cond | B26 | B, src, dst);
1039}
1040
1041
1042void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1043 addrmod3(cond | L | B7 | H | B4, dst, src);
1044}
1045
1046
1047void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1048 addrmod3(cond | B7 | H | B4, src, dst);
1049}
1050
1051
1052void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1053 addrmod3(cond | L | B7 | S6 | B4, dst, src);
1054}
1055
1056
1057void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1058 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1059}
1060
1061
1062// Load/Store multiple instructions
1063void Assembler::ldm(BlockAddrMode am,
1064 Register base,
1065 RegList dst,
1066 Condition cond) {
1067 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable
1068 ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1069
1070 addrmod4(cond | B27 | am | L, base, dst);
1071
1072 // emit the constant pool after a function return implemented by ldm ..{..pc}
1073 if (cond == al && (dst & pc.bit()) != 0) {
1074 // There is a slight chance that the ldm instruction was actually a call,
1075 // in which case it would be wrong to return into the constant pool; we
1076 // recognize this case by checking if the emission of the pool was blocked
1077 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1078 // the case, we emit a jump over the pool.
1079 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1080 }
1081}
1082
1083
1084void Assembler::stm(BlockAddrMode am,
1085 Register base,
1086 RegList src,
1087 Condition cond) {
1088 addrmod4(cond | B27 | am, base, src);
1089}
1090
1091
1092// Semaphore instructions
1093void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
1094 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1095 ASSERT(!dst.is(base) && !src.is(base));
1096 emit(cond | P | base.code()*B16 | dst.code()*B12 |
1097 B7 | B4 | src.code());
1098}
1099
1100
1101void Assembler::swpb(Register dst,
1102 Register src,
1103 Register base,
1104 Condition cond) {
1105 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1106 ASSERT(!dst.is(base) && !src.is(base));
1107 emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
1108 B7 | B4 | src.code());
1109}
1110
1111
1112// Exception-generating instructions and debugging support
1113void Assembler::stop(const char* msg) {
1114#if !defined(__arm__)
1115 // The simulator handles these special instructions and stops execution.
1116 emit(15 << 28 | ((intptr_t) msg));
1117#else
1118 // Just issue a simple break instruction for now. Alternatively we could use
1119 // the swi(0x9f0001) instruction on Linux.
1120 bkpt(0);
1121#endif
1122}
1123
1124
1125void Assembler::bkpt(uint32_t imm16) { // v5 and above
1126 ASSERT(is_uint16(imm16));
1127 emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
1128}
1129
1130
1131void Assembler::swi(uint32_t imm24, Condition cond) {
1132 ASSERT(is_uint24(imm24));
1133 emit(cond | 15*B24 | imm24);
1134}
1135
1136
1137// Coprocessor instructions
1138void Assembler::cdp(Coprocessor coproc,
1139 int opcode_1,
1140 CRegister crd,
1141 CRegister crn,
1142 CRegister crm,
1143 int opcode_2,
1144 Condition cond) {
1145 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1146 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1147 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1148}
1149
1150
1151void Assembler::cdp2(Coprocessor coproc,
1152 int opcode_1,
1153 CRegister crd,
1154 CRegister crn,
1155 CRegister crm,
1156 int opcode_2) { // v5 and above
1157 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
1158}
1159
1160
1161void Assembler::mcr(Coprocessor coproc,
1162 int opcode_1,
1163 Register rd,
1164 CRegister crn,
1165 CRegister crm,
1166 int opcode_2,
1167 Condition cond) {
1168 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1169 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1170 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1171}
1172
1173
1174void Assembler::mcr2(Coprocessor coproc,
1175 int opcode_1,
1176 Register rd,
1177 CRegister crn,
1178 CRegister crm,
1179 int opcode_2) { // v5 and above
1180 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1181}
1182
1183
1184void Assembler::mrc(Coprocessor coproc,
1185 int opcode_1,
1186 Register rd,
1187 CRegister crn,
1188 CRegister crm,
1189 int opcode_2,
1190 Condition cond) {
1191 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1192 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1193 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1194}
1195
1196
1197void Assembler::mrc2(Coprocessor coproc,
1198 int opcode_1,
1199 Register rd,
1200 CRegister crn,
1201 CRegister crm,
1202 int opcode_2) { // v5 and above
1203 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1204}
1205
1206
1207void Assembler::ldc(Coprocessor coproc,
1208 CRegister crd,
1209 const MemOperand& src,
1210 LFlag l,
1211 Condition cond) {
1212 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1213}
1214
1215
1216void Assembler::ldc(Coprocessor coproc,
1217 CRegister crd,
1218 Register rn,
1219 int option,
1220 LFlag l,
1221 Condition cond) {
1222 // unindexed addressing
1223 ASSERT(is_uint8(option));
1224 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1225 coproc*B8 | (option & 255));
1226}
1227
1228
1229void Assembler::ldc2(Coprocessor coproc,
1230 CRegister crd,
1231 const MemOperand& src,
1232 LFlag l) { // v5 and above
1233 ldc(coproc, crd, src, l, static_cast<Condition>(nv));
1234}
1235
1236
1237void Assembler::ldc2(Coprocessor coproc,
1238 CRegister crd,
1239 Register rn,
1240 int option,
1241 LFlag l) { // v5 and above
1242 ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1243}
1244
1245
1246void Assembler::stc(Coprocessor coproc,
1247 CRegister crd,
1248 const MemOperand& dst,
1249 LFlag l,
1250 Condition cond) {
1251 addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
1252}
1253
1254
1255void Assembler::stc(Coprocessor coproc,
1256 CRegister crd,
1257 Register rn,
1258 int option,
1259 LFlag l,
1260 Condition cond) {
1261 // unindexed addressing
1262 ASSERT(is_uint8(option));
1263 emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
1264 coproc*B8 | (option & 255));
1265}
1266
1267
1268void Assembler::stc2(Coprocessor
1269 coproc, CRegister crd,
1270 const MemOperand& dst,
1271 LFlag l) { // v5 and above
1272 stc(coproc, crd, dst, l, static_cast<Condition>(nv));
1273}
1274
1275
1276void Assembler::stc2(Coprocessor coproc,
1277 CRegister crd,
1278 Register rn,
1279 int option,
1280 LFlag l) { // v5 and above
1281 stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1282}
1283
1284
1285// Pseudo instructions
1286void Assembler::lea(Register dst,
1287 const MemOperand& x,
1288 SBit s,
1289 Condition cond) {
1290 int am = x.am_;
1291 if (!x.rm_.is_valid()) {
1292 // immediate offset
1293 if ((am & P) == 0) // post indexing
1294 mov(dst, Operand(x.rn_), s, cond);
1295 else if ((am & U) == 0) // negative indexing
1296 sub(dst, x.rn_, Operand(x.offset_), s, cond);
1297 else
1298 add(dst, x.rn_, Operand(x.offset_), s, cond);
1299 } else {
1300 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1301 // register offset the constructors make sure than both shift_imm_
1302 // and shift_op_ are initialized.
1303 ASSERT(!x.rm_.is(pc));
1304 if ((am & P) == 0) // post indexing
1305 mov(dst, Operand(x.rn_), s, cond);
1306 else if ((am & U) == 0) // negative indexing
1307 sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1308 else
1309 add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1310 }
1311}
1312
1313
1314// Debugging
1315void Assembler::RecordJSReturn() {
1316 WriteRecordedPositions();
1317 CheckBuffer();
1318 RecordRelocInfo(RelocInfo::JS_RETURN);
1319}
1320
1321
1322void Assembler::RecordComment(const char* msg) {
1323 if (FLAG_debug_code) {
1324 CheckBuffer();
1325 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1326 }
1327}
1328
1329
1330void Assembler::RecordPosition(int pos) {
1331 if (pos == RelocInfo::kNoPosition) return;
1332 ASSERT(pos >= 0);
1333 current_position_ = pos;
1334}
1335
1336
1337void Assembler::RecordStatementPosition(int pos) {
1338 if (pos == RelocInfo::kNoPosition) return;
1339 ASSERT(pos >= 0);
1340 current_statement_position_ = pos;
1341}
1342
1343
1344void Assembler::WriteRecordedPositions() {
1345 // Write the statement position if it is different from what was written last
1346 // time.
1347 if (current_statement_position_ != written_statement_position_) {
1348 CheckBuffer();
1349 RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
1350 written_statement_position_ = current_statement_position_;
1351 }
1352
1353 // Write the position if it is different from what was written last time and
1354 // also different from the written statement position.
1355 if (current_position_ != written_position_ &&
1356 current_position_ != written_statement_position_) {
1357 CheckBuffer();
1358 RecordRelocInfo(RelocInfo::POSITION, current_position_);
1359 written_position_ = current_position_;
1360 }
1361}
1362
1363
1364void Assembler::GrowBuffer() {
1365 if (!own_buffer_) FATAL("external code buffer is too small");
1366
1367 // compute new buffer size
1368 CodeDesc desc; // the new buffer
1369 if (buffer_size_ < 4*KB) {
1370 desc.buffer_size = 4*KB;
1371 } else if (buffer_size_ < 1*MB) {
1372 desc.buffer_size = 2*buffer_size_;
1373 } else {
1374 desc.buffer_size = buffer_size_ + 1*MB;
1375 }
1376 CHECK_GT(desc.buffer_size, 0); // no overflow
1377
1378 // setup new buffer
1379 desc.buffer = NewArray<byte>(desc.buffer_size);
1380
1381 desc.instr_size = pc_offset();
1382 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1383
1384 // copy the data
1385 int pc_delta = desc.buffer - buffer_;
1386 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1387 memmove(desc.buffer, buffer_, desc.instr_size);
1388 memmove(reloc_info_writer.pos() + rc_delta,
1389 reloc_info_writer.pos(), desc.reloc_size);
1390
1391 // switch buffers
1392 DeleteArray(buffer_);
1393 buffer_ = desc.buffer;
1394 buffer_size_ = desc.buffer_size;
1395 pc_ += pc_delta;
1396 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1397 reloc_info_writer.last_pc() + pc_delta);
1398
1399 // none of our relocation types are pc relative pointing outside the code
1400 // buffer nor pc absolute pointing inside the code buffer, so there is no need
1401 // to relocate any emitted relocation entries
1402
1403 // relocate pending relocation entries
1404 for (int i = 0; i < num_prinfo_; i++) {
1405 RelocInfo& rinfo = prinfo_[i];
1406 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
1407 rinfo.rmode() != RelocInfo::POSITION);
1408 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
1409 rinfo.set_pc(rinfo.pc() + pc_delta);
1410 }
1411 }
1412}
1413
1414
1415void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1416 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
1417 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
1418 // Adjust code for new modes
1419 ASSERT(RelocInfo::IsJSReturn(rmode)
1420 || RelocInfo::IsComment(rmode)
1421 || RelocInfo::IsPosition(rmode));
1422 // these modes do not need an entry in the constant pool
1423 } else {
1424 ASSERT(num_prinfo_ < kMaxNumPRInfo);
1425 prinfo_[num_prinfo_++] = rinfo;
1426 // Make sure the constant pool is not emitted in place of the next
1427 // instruction for which we just recorded relocation info
1428 BlockConstPoolBefore(pc_offset() + kInstrSize);
1429 }
1430 if (rinfo.rmode() != RelocInfo::NONE) {
1431 // Don't record external references unless the heap will be serialized.
1432 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
1433 !Serializer::enabled() &&
1434 !FLAG_debug_code) {
1435 return;
1436 }
1437 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
1438 reloc_info_writer.Write(&rinfo);
1439 }
1440}
1441
1442
1443void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
1444 // Calculate the offset of the next check. It will be overwritten
1445 // when a const pool is generated or when const pools are being
1446 // blocked for a specific range.
1447 next_buffer_check_ = pc_offset() + kCheckConstInterval;
1448
1449 // There is nothing to do if there are no pending relocation info entries
1450 if (num_prinfo_ == 0) return;
1451
1452 // We emit a constant pool at regular intervals of about kDistBetweenPools
1453 // or when requested by parameter force_emit (e.g. after each function).
1454 // We prefer not to emit a jump unless the max distance is reached or if we
1455 // are running low on slots, which can happen if a lot of constants are being
1456 // emitted (e.g. --debug-code and many static references).
1457 int dist = pc_offset() - last_const_pool_end_;
1458 if (!force_emit && dist < kMaxDistBetweenPools &&
1459 (require_jump || dist < kDistBetweenPools) &&
1460 // TODO(1236125): Cleanup the "magic" number below. We know that
1461 // the code generation will test every kCheckConstIntervalInst.
1462 // Thus we are safe as long as we generate less than 7 constant
1463 // entries per instruction.
1464 (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
1465 return;
1466 }
1467
1468 // If we did not return by now, we need to emit the constant pool soon.
1469
1470 // However, some small sequences of instructions must not be broken up by the
1471 // insertion of a constant pool; such sequences are protected by setting
1472 // no_const_pool_before_, which is checked here. Also, recursive calls to
1473 // CheckConstPool are blocked by no_const_pool_before_.
1474 if (pc_offset() < no_const_pool_before_) {
1475 // Emission is currently blocked; make sure we try again as soon as possible
1476 next_buffer_check_ = no_const_pool_before_;
1477
1478 // Something is wrong if emission is forced and blocked at the same time
1479 ASSERT(!force_emit);
1480 return;
1481 }
1482
1483 int jump_instr = require_jump ? kInstrSize : 0;
1484
1485 // Check that the code buffer is large enough before emitting the constant
1486 // pool and relocation information (include the jump over the pool and the
1487 // constant pool marker).
1488 int max_needed_space =
1489 jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
1490 while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
1491
1492 // Block recursive calls to CheckConstPool
1493 BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
1494 num_prinfo_*kInstrSize);
1495 // Don't bother to check for the emit calls below.
1496 next_buffer_check_ = no_const_pool_before_;
1497
1498 // Emit jump over constant pool if necessary
1499 Label after_pool;
1500 if (require_jump) b(&after_pool);
1501
1502 RecordComment("[ Constant Pool");
1503
1504 // Put down constant pool marker
1505 // "Undefined instruction" as specified by A3.1 Instruction set encoding
1506 emit(0x03000000 | num_prinfo_);
1507
1508 // Emit constant pool entries
1509 for (int i = 0; i < num_prinfo_; i++) {
1510 RelocInfo& rinfo = prinfo_[i];
1511 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
1512 rinfo.rmode() != RelocInfo::POSITION &&
1513 rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
1514 Instr instr = instr_at(rinfo.pc());
1515
1516 // Instruction to patch must be a ldr/str [pc, #offset]
1517 // P and U set, B and W clear, Rn == pc, offset12 still 0
1518 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
1519 (2*B25 | P | U | pc.code()*B16));
1520 int delta = pc_ - rinfo.pc() - 8;
1521 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
1522 if (delta < 0) {
1523 instr &= ~U;
1524 delta = -delta;
1525 }
1526 ASSERT(is_uint12(delta));
1527 instr_at_put(rinfo.pc(), instr + delta);
1528 emit(rinfo.data());
1529 }
1530 num_prinfo_ = 0;
1531 last_const_pool_end_ = pc_offset();
1532
1533 RecordComment("]");
1534
1535 if (after_pool.is_linked()) {
1536 bind(&after_pool);
1537 }
1538
1539 // Since a constant pool was just emitted, move the check offset forward by
1540 // the standard interval.
1541 next_buffer_check_ = pc_offset() + kCheckConstInterval;
1542}
1543
1544
1545} } // namespace v8::internal