blob: f1f59ced7f3cbaf7b7bcddfb153fc5be7495fcb1 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
Leon Clarked91b9f72010-01-27 17:25:45 +000033// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
35// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +000036
37#include "v8.h"
38
39#include "arm/assembler-arm-inl.h"
40#include "serialize.h"
41
42namespace v8 {
43namespace internal {
44
Steve Blockd0582a62009-12-15 09:54:21 +000045// Safe default is no features.
46unsigned CpuFeatures::supported_ = 0;
47unsigned CpuFeatures::enabled_ = 0;
48unsigned CpuFeatures::found_by_runtime_probing_ = 0;
49
Andrei Popescu402d9372010-02-26 13:31:12 +000050
51#ifdef __arm__
52static uint64_t CpuFeaturesImpliedByCompiler() {
53 uint64_t answer = 0;
54#ifdef CAN_USE_ARMV7_INSTRUCTIONS
55 answer |= 1u << ARMv7;
56#endif // def CAN_USE_ARMV7_INSTRUCTIONS
57 // If the compiler is allowed to use VFP then we can use VFP too in our code
58 // generation even when generating snapshots. This won't work for cross
59 // compilation.
60#if defined(__VFP_FP__) && !defined(__SOFTFP__)
61 answer |= 1u << VFP3;
62#endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
63#ifdef CAN_USE_VFP_INSTRUCTIONS
64 answer |= 1u << VFP3;
65#endif // def CAN_USE_VFP_INSTRUCTIONS
66 return answer;
67}
68#endif // def __arm__
69
70
Steve Blockd0582a62009-12-15 09:54:21 +000071void CpuFeatures::Probe() {
Andrei Popescu402d9372010-02-26 13:31:12 +000072#ifndef __arm__
Andrei Popescu31002712010-02-23 13:46:05 +000073 // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
74 if (FLAG_enable_vfp3) {
Steve Block6ded16b2010-05-10 14:33:55 +010075 supported_ |= 1u << VFP3;
Andrei Popescu31002712010-02-23 13:46:05 +000076 }
77 // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
78 if (FLAG_enable_armv7) {
Steve Block6ded16b2010-05-10 14:33:55 +010079 supported_ |= 1u << ARMv7;
Andrei Popescu31002712010-02-23 13:46:05 +000080 }
Andrei Popescu402d9372010-02-26 13:31:12 +000081#else // def __arm__
Steve Blockd0582a62009-12-15 09:54:21 +000082 if (Serializer::enabled()) {
Andrei Popescu402d9372010-02-26 13:31:12 +000083 supported_ |= OS::CpuFeaturesImpliedByPlatform();
84 supported_ |= CpuFeaturesImpliedByCompiler();
Steve Blockd0582a62009-12-15 09:54:21 +000085 return; // No features if we might serialize.
86 }
87
88 if (OS::ArmCpuHasFeature(VFP3)) {
89 // This implementation also sets the VFP flags if
90 // runtime detection of VFP returns true.
91 supported_ |= 1u << VFP3;
92 found_by_runtime_probing_ |= 1u << VFP3;
93 }
Andrei Popescu31002712010-02-23 13:46:05 +000094
95 if (OS::ArmCpuHasFeature(ARMv7)) {
96 supported_ |= 1u << ARMv7;
97 found_by_runtime_probing_ |= 1u << ARMv7;
98 }
Steve Block6ded16b2010-05-10 14:33:55 +010099#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000100}
101
102
Steve Blocka7e24c12009-10-30 11:49:00 +0000103// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000104// Implementation of RelocInfo
105
106const int RelocInfo::kApplyMask = 0;
107
108
109void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
110 // Patch the code at the current address with the supplied instructions.
111 Instr* pc = reinterpret_cast<Instr*>(pc_);
112 Instr* instr = reinterpret_cast<Instr*>(instructions);
113 for (int i = 0; i < instruction_count; i++) {
114 *(pc + i) = *(instr + i);
115 }
116
117 // Indicate that code has changed.
118 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
119}
120
121
122// Patch the code at the current PC with a call to the target address.
123// Additional guard instructions can be added if required.
124void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
125 // Patch the code at the current address with a call to the target.
126 UNIMPLEMENTED();
127}
128
129
130// -----------------------------------------------------------------------------
131// Implementation of Operand and MemOperand
132// See assembler-arm-inl.h for inlined constructors
133
134Operand::Operand(Handle<Object> handle) {
135 rm_ = no_reg;
136 // Verify all Objects referred by code are NOT in new space.
137 Object* obj = *handle;
138 ASSERT(!Heap::InNewSpace(obj));
139 if (obj->IsHeapObject()) {
140 imm32_ = reinterpret_cast<intptr_t>(handle.location());
141 rmode_ = RelocInfo::EMBEDDED_OBJECT;
142 } else {
143 // no relocation needed
144 imm32_ = reinterpret_cast<intptr_t>(obj);
145 rmode_ = RelocInfo::NONE;
146 }
147}
148
149
150Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
151 ASSERT(is_uint5(shift_imm));
152 ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
153 rm_ = rm;
154 rs_ = no_reg;
155 shift_op_ = shift_op;
156 shift_imm_ = shift_imm & 31;
157 if (shift_op == RRX) {
158 // encoded as ROR with shift_imm == 0
159 ASSERT(shift_imm == 0);
160 shift_op_ = ROR;
161 shift_imm_ = 0;
162 }
163}
164
165
166Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
167 ASSERT(shift_op != RRX);
168 rm_ = rm;
169 rs_ = no_reg;
170 shift_op_ = shift_op;
171 rs_ = rs;
172}
173
174
175MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
176 rn_ = rn;
177 rm_ = no_reg;
178 offset_ = offset;
179 am_ = am;
180}
181
182MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
183 rn_ = rn;
184 rm_ = rm;
185 shift_op_ = LSL;
186 shift_imm_ = 0;
187 am_ = am;
188}
189
190
191MemOperand::MemOperand(Register rn, Register rm,
192 ShiftOp shift_op, int shift_imm, AddrMode am) {
193 ASSERT(is_uint5(shift_imm));
194 rn_ = rn;
195 rm_ = rm;
196 shift_op_ = shift_op;
197 shift_imm_ = shift_imm & 31;
198 am_ = am;
199}
200
201
202// -----------------------------------------------------------------------------
Andrei Popescu31002712010-02-23 13:46:05 +0000203// Implementation of Assembler.
Steve Blocka7e24c12009-10-30 11:49:00 +0000204
Andrei Popescu31002712010-02-23 13:46:05 +0000205// Instruction encoding bits.
Steve Blocka7e24c12009-10-30 11:49:00 +0000206enum {
207 H = 1 << 5, // halfword (or byte)
208 S6 = 1 << 6, // signed (or unsigned)
209 L = 1 << 20, // load (or store)
210 S = 1 << 20, // set condition code (or leave unchanged)
211 W = 1 << 21, // writeback base register (or leave unchanged)
212 A = 1 << 21, // accumulate in multiply instruction (or not)
213 B = 1 << 22, // unsigned byte (or word)
214 N = 1 << 22, // long (or short)
215 U = 1 << 23, // positive (or negative) offset/index
216 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
217 I = 1 << 25, // immediate shifter operand (or not)
218
219 B4 = 1 << 4,
220 B5 = 1 << 5,
Steve Blockd0582a62009-12-15 09:54:21 +0000221 B6 = 1 << 6,
Steve Blocka7e24c12009-10-30 11:49:00 +0000222 B7 = 1 << 7,
223 B8 = 1 << 8,
Steve Blockd0582a62009-12-15 09:54:21 +0000224 B9 = 1 << 9,
Steve Blocka7e24c12009-10-30 11:49:00 +0000225 B12 = 1 << 12,
226 B16 = 1 << 16,
Steve Blockd0582a62009-12-15 09:54:21 +0000227 B18 = 1 << 18,
228 B19 = 1 << 19,
Steve Blocka7e24c12009-10-30 11:49:00 +0000229 B20 = 1 << 20,
230 B21 = 1 << 21,
231 B22 = 1 << 22,
232 B23 = 1 << 23,
233 B24 = 1 << 24,
234 B25 = 1 << 25,
235 B26 = 1 << 26,
236 B27 = 1 << 27,
237
Andrei Popescu31002712010-02-23 13:46:05 +0000238 // Instruction bit masks.
Steve Blocka7e24c12009-10-30 11:49:00 +0000239 RdMask = 15 << 12, // in str instruction
240 CondMask = 15 << 28,
241 CoprocessorMask = 15 << 8,
242 OpCodeMask = 15 << 21, // in data-processing instructions
243 Imm24Mask = (1 << 24) - 1,
244 Off12Mask = (1 << 12) - 1,
Andrei Popescu31002712010-02-23 13:46:05 +0000245 // Reserved condition.
Steve Blocka7e24c12009-10-30 11:49:00 +0000246 nv = 15 << 28
247};
248
249
250// add(sp, sp, 4) instruction (aka Pop())
251static const Instr kPopInstruction =
252 al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
253// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
254// register r is not encoded.
255static const Instr kPushRegPattern =
256 al | B26 | 4 | NegPreIndex | sp.code() * B16;
257// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
258// register r is not encoded.
259static const Instr kPopRegPattern =
260 al | B26 | L | 4 | PostIndex | sp.code() * B16;
261// mov lr, pc
262const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
Steve Block6ded16b2010-05-10 14:33:55 +0100263// ldr rd, [pc, #offset]
264const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16;
265const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
266// blxcc rm
267const Instr kBlxRegMask =
268 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
269const Instr kBlxRegPattern =
270 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
Steve Blocka7e24c12009-10-30 11:49:00 +0000271
Andrei Popescu31002712010-02-23 13:46:05 +0000272// Spare buffer.
Steve Blocka7e24c12009-10-30 11:49:00 +0000273static const int kMinimalBufferSize = 4*KB;
274static byte* spare_buffer_ = NULL;
275
276Assembler::Assembler(void* buffer, int buffer_size) {
277 if (buffer == NULL) {
Andrei Popescu31002712010-02-23 13:46:05 +0000278 // Do our own buffer management.
Steve Blocka7e24c12009-10-30 11:49:00 +0000279 if (buffer_size <= kMinimalBufferSize) {
280 buffer_size = kMinimalBufferSize;
281
282 if (spare_buffer_ != NULL) {
283 buffer = spare_buffer_;
284 spare_buffer_ = NULL;
285 }
286 }
287 if (buffer == NULL) {
288 buffer_ = NewArray<byte>(buffer_size);
289 } else {
290 buffer_ = static_cast<byte*>(buffer);
291 }
292 buffer_size_ = buffer_size;
293 own_buffer_ = true;
294
295 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000296 // Use externally provided buffer instead.
Steve Blocka7e24c12009-10-30 11:49:00 +0000297 ASSERT(buffer_size > 0);
298 buffer_ = static_cast<byte*>(buffer);
299 buffer_size_ = buffer_size;
300 own_buffer_ = false;
301 }
302
Andrei Popescu31002712010-02-23 13:46:05 +0000303 // Setup buffer pointers.
Steve Blocka7e24c12009-10-30 11:49:00 +0000304 ASSERT(buffer_ != NULL);
305 pc_ = buffer_;
306 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
307 num_prinfo_ = 0;
308 next_buffer_check_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100309 const_pool_blocked_nesting_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000310 no_const_pool_before_ = 0;
311 last_const_pool_end_ = 0;
312 last_bound_pos_ = 0;
313 current_statement_position_ = RelocInfo::kNoPosition;
314 current_position_ = RelocInfo::kNoPosition;
315 written_statement_position_ = current_statement_position_;
316 written_position_ = current_position_;
317}
318
319
320Assembler::~Assembler() {
Steve Block6ded16b2010-05-10 14:33:55 +0100321 ASSERT(const_pool_blocked_nesting_ == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000322 if (own_buffer_) {
323 if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
324 spare_buffer_ = buffer_;
325 } else {
326 DeleteArray(buffer_);
327 }
328 }
329}
330
331
332void Assembler::GetCode(CodeDesc* desc) {
Andrei Popescu31002712010-02-23 13:46:05 +0000333 // Emit constant pool if necessary.
Steve Blocka7e24c12009-10-30 11:49:00 +0000334 CheckConstPool(true, false);
335 ASSERT(num_prinfo_ == 0);
336
Andrei Popescu31002712010-02-23 13:46:05 +0000337 // Setup code descriptor.
Steve Blocka7e24c12009-10-30 11:49:00 +0000338 desc->buffer = buffer_;
339 desc->buffer_size = buffer_size_;
340 desc->instr_size = pc_offset();
341 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
342}
343
344
345void Assembler::Align(int m) {
346 ASSERT(m >= 4 && IsPowerOf2(m));
347 while ((pc_offset() & (m - 1)) != 0) {
348 nop();
349 }
350}
351
352
Steve Block6ded16b2010-05-10 14:33:55 +0100353bool Assembler::IsNop(Instr instr, int type) {
354 // Check for mov rx, rx.
355 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
356 return instr == (al | 13*B21 | type*B12 | type);
357}
358
359
360bool Assembler::IsBranch(Instr instr) {
361 return (instr & (B27 | B25)) == (B27 | B25);
362}
363
364
365int Assembler::GetBranchOffset(Instr instr) {
366 ASSERT(IsBranch(instr));
367 // Take the jump offset in the lower 24 bits, sign extend it and multiply it
368 // with 4 to get the offset in bytes.
369 return ((instr & Imm24Mask) << 8) >> 6;
370}
371
372
373bool Assembler::IsLdrRegisterImmediate(Instr instr) {
374 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
375}
376
377
378int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
379 ASSERT(IsLdrRegisterImmediate(instr));
380 bool positive = (instr & B23) == B23;
381 int offset = instr & Off12Mask; // Zero extended offset.
382 return positive ? offset : -offset;
383}
384
385
386Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
387 ASSERT(IsLdrRegisterImmediate(instr));
388 bool positive = offset >= 0;
389 if (!positive) offset = -offset;
390 ASSERT(is_uint12(offset));
391 // Set bit indicating whether the offset should be added.
392 instr = (instr & ~B23) | (positive ? B23 : 0);
393 // Set the actual offset.
394 return (instr & ~Off12Mask) | offset;
395}
396
397
Steve Blocka7e24c12009-10-30 11:49:00 +0000398// Labels refer to positions in the (to be) generated code.
399// There are bound, linked, and unused labels.
400//
401// Bound labels refer to known positions in the already
402// generated code. pos() is the position the label refers to.
403//
404// Linked labels refer to unknown positions in the code
405// to be generated; pos() is the position of the last
406// instruction using the label.
407
408
409// The link chain is terminated by a negative code position (must be aligned)
410const int kEndOfChain = -4;
411
412
413int Assembler::target_at(int pos) {
414 Instr instr = instr_at(pos);
415 if ((instr & ~Imm24Mask) == 0) {
416 // Emitted label constant, not part of a branch.
417 return instr - (Code::kHeaderSize - kHeapObjectTag);
418 }
419 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
420 int imm26 = ((instr & Imm24Mask) << 8) >> 6;
Steve Block6ded16b2010-05-10 14:33:55 +0100421 if ((instr & CondMask) == nv && (instr & B24) != 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000422 // blx uses bit 24 to encode bit 2 of imm26
423 imm26 += 2;
Steve Block6ded16b2010-05-10 14:33:55 +0100424 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000425 return pos + kPcLoadDelta + imm26;
426}
427
428
429void Assembler::target_at_put(int pos, int target_pos) {
430 Instr instr = instr_at(pos);
431 if ((instr & ~Imm24Mask) == 0) {
432 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
433 // Emitted label constant, not part of a branch.
434 // Make label relative to Code* of generated Code object.
435 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
436 return;
437 }
438 int imm26 = target_pos - (pos + kPcLoadDelta);
439 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
440 if ((instr & CondMask) == nv) {
441 // blx uses bit 24 to encode bit 2 of imm26
442 ASSERT((imm26 & 1) == 0);
443 instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
444 } else {
445 ASSERT((imm26 & 3) == 0);
446 instr &= ~Imm24Mask;
447 }
448 int imm24 = imm26 >> 2;
449 ASSERT(is_int24(imm24));
450 instr_at_put(pos, instr | (imm24 & Imm24Mask));
451}
452
453
454void Assembler::print(Label* L) {
455 if (L->is_unused()) {
456 PrintF("unused label\n");
457 } else if (L->is_bound()) {
458 PrintF("bound label to %d\n", L->pos());
459 } else if (L->is_linked()) {
460 Label l = *L;
461 PrintF("unbound label");
462 while (l.is_linked()) {
463 PrintF("@ %d ", l.pos());
464 Instr instr = instr_at(l.pos());
465 if ((instr & ~Imm24Mask) == 0) {
466 PrintF("value\n");
467 } else {
468 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
469 int cond = instr & CondMask;
470 const char* b;
471 const char* c;
472 if (cond == nv) {
473 b = "blx";
474 c = "";
475 } else {
476 if ((instr & B24) != 0)
477 b = "bl";
478 else
479 b = "b";
480
481 switch (cond) {
482 case eq: c = "eq"; break;
483 case ne: c = "ne"; break;
484 case hs: c = "hs"; break;
485 case lo: c = "lo"; break;
486 case mi: c = "mi"; break;
487 case pl: c = "pl"; break;
488 case vs: c = "vs"; break;
489 case vc: c = "vc"; break;
490 case hi: c = "hi"; break;
491 case ls: c = "ls"; break;
492 case ge: c = "ge"; break;
493 case lt: c = "lt"; break;
494 case gt: c = "gt"; break;
495 case le: c = "le"; break;
496 case al: c = ""; break;
497 default:
498 c = "";
499 UNREACHABLE();
500 }
501 }
502 PrintF("%s%s\n", b, c);
503 }
504 next(&l);
505 }
506 } else {
507 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
508 }
509}
510
511
512void Assembler::bind_to(Label* L, int pos) {
513 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
514 while (L->is_linked()) {
515 int fixup_pos = L->pos();
516 next(L); // call next before overwriting link with target at fixup_pos
517 target_at_put(fixup_pos, pos);
518 }
519 L->bind_to(pos);
520
521 // Keep track of the last bound label so we don't eliminate any instructions
522 // before a bound label.
523 if (pos > last_bound_pos_)
524 last_bound_pos_ = pos;
525}
526
527
528void Assembler::link_to(Label* L, Label* appendix) {
529 if (appendix->is_linked()) {
530 if (L->is_linked()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000531 // Append appendix to L's list.
Steve Blocka7e24c12009-10-30 11:49:00 +0000532 int fixup_pos;
533 int link = L->pos();
534 do {
535 fixup_pos = link;
536 link = target_at(fixup_pos);
537 } while (link > 0);
538 ASSERT(link == kEndOfChain);
539 target_at_put(fixup_pos, appendix->pos());
540 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000541 // L is empty, simply use appendix.
Steve Blocka7e24c12009-10-30 11:49:00 +0000542 *L = *appendix;
543 }
544 }
545 appendix->Unuse(); // appendix should not be used anymore
546}
547
548
549void Assembler::bind(Label* L) {
550 ASSERT(!L->is_bound()); // label can only be bound once
551 bind_to(L, pc_offset());
552}
553
554
555void Assembler::next(Label* L) {
556 ASSERT(L->is_linked());
557 int link = target_at(L->pos());
558 if (link > 0) {
559 L->link_to(link);
560 } else {
561 ASSERT(link == kEndOfChain);
562 L->Unuse();
563 }
564}
565
566
Andrei Popescu31002712010-02-23 13:46:05 +0000567// Low-level code emission routines depending on the addressing mode.
Steve Blocka7e24c12009-10-30 11:49:00 +0000568static bool fits_shifter(uint32_t imm32,
569 uint32_t* rotate_imm,
570 uint32_t* immed_8,
571 Instr* instr) {
Andrei Popescu31002712010-02-23 13:46:05 +0000572 // imm32 must be unsigned.
Steve Blocka7e24c12009-10-30 11:49:00 +0000573 for (int rot = 0; rot < 16; rot++) {
574 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
575 if ((imm8 <= 0xff)) {
576 *rotate_imm = rot;
577 *immed_8 = imm8;
578 return true;
579 }
580 }
Andrei Popescu31002712010-02-23 13:46:05 +0000581 // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
Steve Blocka7e24c12009-10-30 11:49:00 +0000582 if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
583 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
584 *instr ^= 0x2*B21;
585 return true;
586 }
587 }
588 return false;
589}
590
591
592// We have to use the temporary register for things that can be relocated even
593// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
594// space. There is no guarantee that the relocated location can be similarly
595// encoded.
596static bool MustUseIp(RelocInfo::Mode rmode) {
597 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
Steve Blockd0582a62009-12-15 09:54:21 +0000598#ifdef DEBUG
599 if (!Serializer::enabled()) {
600 Serializer::TooLateToEnableNow();
601 }
Andrei Popescu402d9372010-02-26 13:31:12 +0000602#endif // def DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000603 return Serializer::enabled();
604 } else if (rmode == RelocInfo::NONE) {
605 return false;
606 }
607 return true;
608}
609
610
611void Assembler::addrmod1(Instr instr,
612 Register rn,
613 Register rd,
614 const Operand& x) {
615 CheckBuffer();
616 ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
617 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000618 // Immediate.
Steve Blocka7e24c12009-10-30 11:49:00 +0000619 uint32_t rotate_imm;
620 uint32_t immed_8;
621 if (MustUseIp(x.rmode_) ||
622 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
623 // The immediate operand cannot be encoded as a shifter operand, so load
624 // it first to register ip and change the original instruction to use ip.
625 // However, if the original instruction is a 'mov rd, x' (not setting the
Andrei Popescu31002712010-02-23 13:46:05 +0000626 // condition code), then replace it with a 'ldr rd, [pc]'.
Steve Blocka7e24c12009-10-30 11:49:00 +0000627 RecordRelocInfo(x.rmode_, x.imm32_);
628 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
629 Condition cond = static_cast<Condition>(instr & CondMask);
630 if ((instr & ~CondMask) == 13*B21) { // mov, S not set
631 ldr(rd, MemOperand(pc, 0), cond);
632 } else {
633 ldr(ip, MemOperand(pc, 0), cond);
634 addrmod1(instr, rn, rd, Operand(ip));
635 }
636 return;
637 }
638 instr |= I | rotate_imm*B8 | immed_8;
639 } else if (!x.rs_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000640 // Immediate shift.
Steve Blocka7e24c12009-10-30 11:49:00 +0000641 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
642 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000643 // Register shift.
Steve Blocka7e24c12009-10-30 11:49:00 +0000644 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
645 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
646 }
647 emit(instr | rn.code()*B16 | rd.code()*B12);
648 if (rn.is(pc) || x.rm_.is(pc))
Andrei Popescu31002712010-02-23 13:46:05 +0000649 // Block constant pool emission for one instruction after reading pc.
Steve Blocka7e24c12009-10-30 11:49:00 +0000650 BlockConstPoolBefore(pc_offset() + kInstrSize);
651}
652
653
654void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
655 ASSERT((instr & ~(CondMask | B | L)) == B26);
656 int am = x.am_;
657 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000658 // Immediate offset.
Steve Blocka7e24c12009-10-30 11:49:00 +0000659 int offset_12 = x.offset_;
660 if (offset_12 < 0) {
661 offset_12 = -offset_12;
662 am ^= U;
663 }
664 if (!is_uint12(offset_12)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000665 // Immediate offset cannot be encoded, load it first to register ip
666 // rn (and rd in a load) should never be ip, or will be trashed.
Steve Blocka7e24c12009-10-30 11:49:00 +0000667 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
668 mov(ip, Operand(x.offset_), LeaveCC,
669 static_cast<Condition>(instr & CondMask));
670 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
671 return;
672 }
673 ASSERT(offset_12 >= 0); // no masking needed
674 instr |= offset_12;
675 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000676 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
Steve Blocka7e24c12009-10-30 11:49:00 +0000677 // register offset the constructors make sure than both shift_imm_
Andrei Popescu31002712010-02-23 13:46:05 +0000678 // and shift_op_ are initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +0000679 ASSERT(!x.rm_.is(pc));
680 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
681 }
682 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
683 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
684}
685
686
687void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
688 ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
689 ASSERT(x.rn_.is_valid());
690 int am = x.am_;
691 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000692 // Immediate offset.
Steve Blocka7e24c12009-10-30 11:49:00 +0000693 int offset_8 = x.offset_;
694 if (offset_8 < 0) {
695 offset_8 = -offset_8;
696 am ^= U;
697 }
698 if (!is_uint8(offset_8)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000699 // Immediate offset cannot be encoded, load it first to register ip
700 // rn (and rd in a load) should never be ip, or will be trashed.
Steve Blocka7e24c12009-10-30 11:49:00 +0000701 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
702 mov(ip, Operand(x.offset_), LeaveCC,
703 static_cast<Condition>(instr & CondMask));
704 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
705 return;
706 }
707 ASSERT(offset_8 >= 0); // no masking needed
708 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
709 } else if (x.shift_imm_ != 0) {
Andrei Popescu31002712010-02-23 13:46:05 +0000710 // Scaled register offset not supported, load index first
711 // rn (and rd in a load) should never be ip, or will be trashed.
Steve Blocka7e24c12009-10-30 11:49:00 +0000712 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
713 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
714 static_cast<Condition>(instr & CondMask));
715 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
716 return;
717 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000718 // Register offset.
Steve Blocka7e24c12009-10-30 11:49:00 +0000719 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
720 instr |= x.rm_.code();
721 }
722 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
723 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
724}
725
726
727void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
728 ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
729 ASSERT(rl != 0);
730 ASSERT(!rn.is(pc));
731 emit(instr | rn.code()*B16 | rl);
732}
733
734
735void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
Andrei Popescu31002712010-02-23 13:46:05 +0000736 // Unindexed addressing is not encoded by this function.
Steve Blocka7e24c12009-10-30 11:49:00 +0000737 ASSERT_EQ((B27 | B26),
738 (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
739 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
740 int am = x.am_;
741 int offset_8 = x.offset_;
742 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
743 offset_8 >>= 2;
744 if (offset_8 < 0) {
745 offset_8 = -offset_8;
746 am ^= U;
747 }
748 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
749 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
750
Andrei Popescu31002712010-02-23 13:46:05 +0000751 // Post-indexed addressing requires W == 1; different than in addrmod2/3.
Steve Blocka7e24c12009-10-30 11:49:00 +0000752 if ((am & P) == 0)
753 am |= W;
754
755 ASSERT(offset_8 >= 0); // no masking needed
756 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
757}
758
759
760int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
761 int target_pos;
762 if (L->is_bound()) {
763 target_pos = L->pos();
764 } else {
765 if (L->is_linked()) {
766 target_pos = L->pos(); // L's link
767 } else {
768 target_pos = kEndOfChain;
769 }
770 L->link_to(pc_offset());
771 }
772
773 // Block the emission of the constant pool, since the branch instruction must
Andrei Popescu31002712010-02-23 13:46:05 +0000774 // be emitted at the pc offset recorded by the label.
Steve Blocka7e24c12009-10-30 11:49:00 +0000775 BlockConstPoolBefore(pc_offset() + kInstrSize);
776 return target_pos - (pc_offset() + kPcLoadDelta);
777}
778
779
780void Assembler::label_at_put(Label* L, int at_offset) {
781 int target_pos;
782 if (L->is_bound()) {
783 target_pos = L->pos();
784 } else {
785 if (L->is_linked()) {
786 target_pos = L->pos(); // L's link
787 } else {
788 target_pos = kEndOfChain;
789 }
790 L->link_to(at_offset);
791 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
792 }
793}
794
795
Andrei Popescu31002712010-02-23 13:46:05 +0000796// Branch instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +0000797void Assembler::b(int branch_offset, Condition cond) {
798 ASSERT((branch_offset & 3) == 0);
799 int imm24 = branch_offset >> 2;
800 ASSERT(is_int24(imm24));
801 emit(cond | B27 | B25 | (imm24 & Imm24Mask));
802
Steve Block6ded16b2010-05-10 14:33:55 +0100803 if (cond == al) {
Andrei Popescu31002712010-02-23 13:46:05 +0000804 // Dead code is a good location to emit the constant pool.
Steve Blocka7e24c12009-10-30 11:49:00 +0000805 CheckConstPool(false, false);
Steve Block6ded16b2010-05-10 14:33:55 +0100806 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000807}
808
809
810void Assembler::bl(int branch_offset, Condition cond) {
811 ASSERT((branch_offset & 3) == 0);
812 int imm24 = branch_offset >> 2;
813 ASSERT(is_int24(imm24));
814 emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
815}
816
817
818void Assembler::blx(int branch_offset) { // v5 and above
819 WriteRecordedPositions();
820 ASSERT((branch_offset & 1) == 0);
821 int h = ((branch_offset & 2) >> 1)*B24;
822 int imm24 = branch_offset >> 2;
823 ASSERT(is_int24(imm24));
824 emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
825}
826
827
828void Assembler::blx(Register target, Condition cond) { // v5 and above
829 WriteRecordedPositions();
830 ASSERT(!target.is(pc));
831 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
832}
833
834
835void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
836 WriteRecordedPositions();
837 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
838 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
839}
840
841
Andrei Popescu31002712010-02-23 13:46:05 +0000842// Data-processing instructions.
843
844// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
845// Instruction details available in ARM DDI 0406A, A8-464.
846// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
847// Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
848void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
849 const Operand& src3, Condition cond) {
850 ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
851 ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
852 ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
853 emit(cond | 0x3F*B21 | src3.imm32_*B16 |
854 dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
855}
856
857
Steve Blocka7e24c12009-10-30 11:49:00 +0000858void Assembler::and_(Register dst, Register src1, const Operand& src2,
859 SBit s, Condition cond) {
860 addrmod1(cond | 0*B21 | s, src1, dst, src2);
861}
862
863
864void Assembler::eor(Register dst, Register src1, const Operand& src2,
865 SBit s, Condition cond) {
866 addrmod1(cond | 1*B21 | s, src1, dst, src2);
867}
868
869
870void Assembler::sub(Register dst, Register src1, const Operand& src2,
871 SBit s, Condition cond) {
872 addrmod1(cond | 2*B21 | s, src1, dst, src2);
873}
874
875
876void Assembler::rsb(Register dst, Register src1, const Operand& src2,
877 SBit s, Condition cond) {
878 addrmod1(cond | 3*B21 | s, src1, dst, src2);
879}
880
881
882void Assembler::add(Register dst, Register src1, const Operand& src2,
883 SBit s, Condition cond) {
884 addrmod1(cond | 4*B21 | s, src1, dst, src2);
885
886 // Eliminate pattern: push(r), pop()
887 // str(src, MemOperand(sp, 4, NegPreIndex), al);
888 // add(sp, sp, Operand(kPointerSize));
889 // Both instructions can be eliminated.
890 int pattern_size = 2 * kInstrSize;
891 if (FLAG_push_pop_elimination &&
892 last_bound_pos_ <= (pc_offset() - pattern_size) &&
893 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
Andrei Popescu31002712010-02-23 13:46:05 +0000894 // Pattern.
Steve Blocka7e24c12009-10-30 11:49:00 +0000895 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
896 (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
897 pc_ -= 2 * kInstrSize;
898 if (FLAG_print_push_pop_elimination) {
899 PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
900 }
901 }
902}
903
904
905void Assembler::adc(Register dst, Register src1, const Operand& src2,
906 SBit s, Condition cond) {
907 addrmod1(cond | 5*B21 | s, src1, dst, src2);
908}
909
910
911void Assembler::sbc(Register dst, Register src1, const Operand& src2,
912 SBit s, Condition cond) {
913 addrmod1(cond | 6*B21 | s, src1, dst, src2);
914}
915
916
917void Assembler::rsc(Register dst, Register src1, const Operand& src2,
918 SBit s, Condition cond) {
919 addrmod1(cond | 7*B21 | s, src1, dst, src2);
920}
921
922
923void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
924 addrmod1(cond | 8*B21 | S, src1, r0, src2);
925}
926
927
928void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
929 addrmod1(cond | 9*B21 | S, src1, r0, src2);
930}
931
932
933void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
934 addrmod1(cond | 10*B21 | S, src1, r0, src2);
935}
936
937
938void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
939 addrmod1(cond | 11*B21 | S, src1, r0, src2);
940}
941
942
943void Assembler::orr(Register dst, Register src1, const Operand& src2,
944 SBit s, Condition cond) {
945 addrmod1(cond | 12*B21 | s, src1, dst, src2);
946}
947
948
949void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
950 if (dst.is(pc)) {
951 WriteRecordedPositions();
952 }
Steve Block6ded16b2010-05-10 14:33:55 +0100953 // Don't allow nop instructions in the form mov rn, rn to be generated using
954 // the mov instruction. They must be generated using nop(int)
955 // pseudo instructions.
956 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
Steve Blocka7e24c12009-10-30 11:49:00 +0000957 addrmod1(cond | 13*B21 | s, r0, dst, src);
958}
959
960
961void Assembler::bic(Register dst, Register src1, const Operand& src2,
962 SBit s, Condition cond) {
963 addrmod1(cond | 14*B21 | s, src1, dst, src2);
964}
965
966
967void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
968 addrmod1(cond | 15*B21 | s, r0, dst, src);
969}
970
971
Andrei Popescu31002712010-02-23 13:46:05 +0000972// Multiply instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +0000973void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
974 SBit s, Condition cond) {
975 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
976 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
977 src2.code()*B8 | B7 | B4 | src1.code());
978}
979
980
981void Assembler::mul(Register dst, Register src1, Register src2,
982 SBit s, Condition cond) {
983 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
984 // dst goes in bits 16-19 for this instruction!
985 emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
986}
987
988
989void Assembler::smlal(Register dstL,
990 Register dstH,
991 Register src1,
992 Register src2,
993 SBit s,
994 Condition cond) {
995 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
996 ASSERT(!dstL.is(dstH));
997 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
998 src2.code()*B8 | B7 | B4 | src1.code());
999}
1000
1001
1002void Assembler::smull(Register dstL,
1003 Register dstH,
1004 Register src1,
1005 Register src2,
1006 SBit s,
1007 Condition cond) {
1008 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1009 ASSERT(!dstL.is(dstH));
1010 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1011 src2.code()*B8 | B7 | B4 | src1.code());
1012}
1013
1014
1015void Assembler::umlal(Register dstL,
1016 Register dstH,
1017 Register src1,
1018 Register src2,
1019 SBit s,
1020 Condition cond) {
1021 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1022 ASSERT(!dstL.is(dstH));
1023 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1024 src2.code()*B8 | B7 | B4 | src1.code());
1025}
1026
1027
1028void Assembler::umull(Register dstL,
1029 Register dstH,
1030 Register src1,
1031 Register src2,
1032 SBit s,
1033 Condition cond) {
1034 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1035 ASSERT(!dstL.is(dstH));
1036 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1037 src2.code()*B8 | B7 | B4 | src1.code());
1038}
1039
1040
Andrei Popescu31002712010-02-23 13:46:05 +00001041// Miscellaneous arithmetic instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001042void Assembler::clz(Register dst, Register src, Condition cond) {
1043 // v5 and above.
1044 ASSERT(!dst.is(pc) && !src.is(pc));
1045 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1046 15*B8 | B4 | src.code());
1047}
1048
1049
Andrei Popescu31002712010-02-23 13:46:05 +00001050// Status register access instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001051void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1052 ASSERT(!dst.is(pc));
1053 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1054}
1055
1056
1057void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1058 Condition cond) {
1059 ASSERT(fields >= B16 && fields < B20); // at least one field set
1060 Instr instr;
1061 if (!src.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001062 // Immediate.
Steve Blocka7e24c12009-10-30 11:49:00 +00001063 uint32_t rotate_imm;
1064 uint32_t immed_8;
1065 if (MustUseIp(src.rmode_) ||
1066 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
Andrei Popescu31002712010-02-23 13:46:05 +00001067 // Immediate operand cannot be encoded, load it first to register ip.
Steve Blocka7e24c12009-10-30 11:49:00 +00001068 RecordRelocInfo(src.rmode_, src.imm32_);
1069 ldr(ip, MemOperand(pc, 0), cond);
1070 msr(fields, Operand(ip), cond);
1071 return;
1072 }
1073 instr = I | rotate_imm*B8 | immed_8;
1074 } else {
1075 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
1076 instr = src.rm_.code();
1077 }
1078 emit(cond | instr | B24 | B21 | fields | 15*B12);
1079}
1080
1081
Andrei Popescu31002712010-02-23 13:46:05 +00001082// Load/Store instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001083void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1084 if (dst.is(pc)) {
1085 WriteRecordedPositions();
1086 }
1087 addrmod2(cond | B26 | L, dst, src);
1088
1089 // Eliminate pattern: push(r), pop(r)
1090 // str(r, MemOperand(sp, 4, NegPreIndex), al)
1091 // ldr(r, MemOperand(sp, 4, PostIndex), al)
1092 // Both instructions can be eliminated.
1093 int pattern_size = 2 * kInstrSize;
1094 if (FLAG_push_pop_elimination &&
1095 last_bound_pos_ <= (pc_offset() - pattern_size) &&
1096 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
Andrei Popescu31002712010-02-23 13:46:05 +00001097 // Pattern.
Steve Blocka7e24c12009-10-30 11:49:00 +00001098 instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
1099 instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
1100 pc_ -= 2 * kInstrSize;
1101 if (FLAG_print_push_pop_elimination) {
1102 PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1103 }
1104 }
1105}
1106
1107
1108void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1109 addrmod2(cond | B26, src, dst);
1110
1111 // Eliminate pattern: pop(), push(r)
1112 // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
1113 // -> str r, [sp, 0], al
1114 int pattern_size = 2 * kInstrSize;
1115 if (FLAG_push_pop_elimination &&
1116 last_bound_pos_ <= (pc_offset() - pattern_size) &&
1117 reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
Andrei Popescu31002712010-02-23 13:46:05 +00001118 // Pattern.
Steve Blocka7e24c12009-10-30 11:49:00 +00001119 instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
1120 instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
1121 pc_ -= 2 * kInstrSize;
1122 emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
1123 if (FLAG_print_push_pop_elimination) {
1124 PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1125 }
1126 }
1127}
1128
1129
1130void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1131 addrmod2(cond | B26 | B | L, dst, src);
1132}
1133
1134
1135void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1136 addrmod2(cond | B26 | B, src, dst);
1137}
1138
1139
1140void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1141 addrmod3(cond | L | B7 | H | B4, dst, src);
1142}
1143
1144
1145void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1146 addrmod3(cond | B7 | H | B4, src, dst);
1147}
1148
1149
1150void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1151 addrmod3(cond | L | B7 | S6 | B4, dst, src);
1152}
1153
1154
1155void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1156 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1157}
1158
1159
Kristian Monsen25f61362010-05-21 11:50:48 +01001160void Assembler::ldrd(Register dst, const MemOperand& src, Condition cond) {
1161 ASSERT(src.rm().is(no_reg));
1162#ifdef CAN_USE_ARMV7_INSTRUCTIONS
1163 addrmod3(cond | B7 | B6 | B4, dst, src);
1164#else
1165 ldr(dst, src, cond);
1166 MemOperand src1(src);
1167 src1.set_offset(src1.offset() + 4);
1168 Register dst1(dst);
1169 dst1.code_ = dst1.code_ + 1;
1170 ldr(dst1, src1, cond);
1171#endif
1172}
1173
1174
1175void Assembler::strd(Register src, const MemOperand& dst, Condition cond) {
1176 ASSERT(dst.rm().is(no_reg));
1177#ifdef CAN_USE_ARMV7_INSTRUCTIONS
1178 addrmod3(cond | B7 | B6 | B5 | B4, src, dst);
1179#else
1180 str(src, dst, cond);
1181 MemOperand dst1(dst);
1182 dst1.set_offset(dst1.offset() + 4);
1183 Register src1(src);
1184 src1.code_ = src1.code_ + 1;
1185 str(src1, dst1, cond);
1186#endif
1187}
1188
Andrei Popescu31002712010-02-23 13:46:05 +00001189// Load/Store multiple instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001190void Assembler::ldm(BlockAddrMode am,
1191 Register base,
1192 RegList dst,
1193 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00001194 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
Steve Blocka7e24c12009-10-30 11:49:00 +00001195 ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1196
1197 addrmod4(cond | B27 | am | L, base, dst);
1198
Andrei Popescu31002712010-02-23 13:46:05 +00001199 // Emit the constant pool after a function return implemented by ldm ..{..pc}.
Steve Blocka7e24c12009-10-30 11:49:00 +00001200 if (cond == al && (dst & pc.bit()) != 0) {
1201 // There is a slight chance that the ldm instruction was actually a call,
1202 // in which case it would be wrong to return into the constant pool; we
1203 // recognize this case by checking if the emission of the pool was blocked
1204 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1205 // the case, we emit a jump over the pool.
1206 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1207 }
1208}
1209
1210
1211void Assembler::stm(BlockAddrMode am,
1212 Register base,
1213 RegList src,
1214 Condition cond) {
1215 addrmod4(cond | B27 | am, base, src);
1216}
1217
1218
Andrei Popescu31002712010-02-23 13:46:05 +00001219// Semaphore instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001220void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
1221 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1222 ASSERT(!dst.is(base) && !src.is(base));
1223 emit(cond | P | base.code()*B16 | dst.code()*B12 |
1224 B7 | B4 | src.code());
1225}
1226
1227
1228void Assembler::swpb(Register dst,
1229 Register src,
1230 Register base,
1231 Condition cond) {
1232 ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
1233 ASSERT(!dst.is(base) && !src.is(base));
1234 emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
1235 B7 | B4 | src.code());
1236}
1237
1238
Andrei Popescu31002712010-02-23 13:46:05 +00001239// Exception-generating instructions and debugging support.
Steve Blocka7e24c12009-10-30 11:49:00 +00001240void Assembler::stop(const char* msg) {
Andrei Popescu402d9372010-02-26 13:31:12 +00001241#ifndef __arm__
Steve Blocka7e24c12009-10-30 11:49:00 +00001242 // The simulator handles these special instructions and stops execution.
1243 emit(15 << 28 | ((intptr_t) msg));
Andrei Popescu402d9372010-02-26 13:31:12 +00001244#else // def __arm__
1245#ifdef CAN_USE_ARMV5_INSTRUCTIONS
Steve Blocka7e24c12009-10-30 11:49:00 +00001246 bkpt(0);
Andrei Popescu402d9372010-02-26 13:31:12 +00001247#else // ndef CAN_USE_ARMV5_INSTRUCTIONS
1248 swi(0x9f0001);
1249#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
1250#endif // def __arm__
Steve Blocka7e24c12009-10-30 11:49:00 +00001251}
1252
1253
1254void Assembler::bkpt(uint32_t imm16) { // v5 and above
1255 ASSERT(is_uint16(imm16));
1256 emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
1257}
1258
1259
1260void Assembler::swi(uint32_t imm24, Condition cond) {
1261 ASSERT(is_uint24(imm24));
1262 emit(cond | 15*B24 | imm24);
1263}
1264
1265
Andrei Popescu31002712010-02-23 13:46:05 +00001266// Coprocessor instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001267void Assembler::cdp(Coprocessor coproc,
1268 int opcode_1,
1269 CRegister crd,
1270 CRegister crn,
1271 CRegister crm,
1272 int opcode_2,
1273 Condition cond) {
1274 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1275 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1276 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1277}
1278
1279
1280void Assembler::cdp2(Coprocessor coproc,
1281 int opcode_1,
1282 CRegister crd,
1283 CRegister crn,
1284 CRegister crm,
1285 int opcode_2) { // v5 and above
1286 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
1287}
1288
1289
1290void Assembler::mcr(Coprocessor coproc,
1291 int opcode_1,
1292 Register rd,
1293 CRegister crn,
1294 CRegister crm,
1295 int opcode_2,
1296 Condition cond) {
1297 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1298 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1299 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1300}
1301
1302
1303void Assembler::mcr2(Coprocessor coproc,
1304 int opcode_1,
1305 Register rd,
1306 CRegister crn,
1307 CRegister crm,
1308 int opcode_2) { // v5 and above
1309 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1310}
1311
1312
1313void Assembler::mrc(Coprocessor coproc,
1314 int opcode_1,
1315 Register rd,
1316 CRegister crn,
1317 CRegister crm,
1318 int opcode_2,
1319 Condition cond) {
1320 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1321 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1322 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1323}
1324
1325
1326void Assembler::mrc2(Coprocessor coproc,
1327 int opcode_1,
1328 Register rd,
1329 CRegister crn,
1330 CRegister crm,
1331 int opcode_2) { // v5 and above
1332 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1333}
1334
1335
1336void Assembler::ldc(Coprocessor coproc,
1337 CRegister crd,
1338 const MemOperand& src,
1339 LFlag l,
1340 Condition cond) {
1341 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1342}
1343
1344
1345void Assembler::ldc(Coprocessor coproc,
1346 CRegister crd,
1347 Register rn,
1348 int option,
1349 LFlag l,
1350 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00001351 // Unindexed addressing.
Steve Blocka7e24c12009-10-30 11:49:00 +00001352 ASSERT(is_uint8(option));
1353 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1354 coproc*B8 | (option & 255));
1355}
1356
1357
1358void Assembler::ldc2(Coprocessor coproc,
1359 CRegister crd,
1360 const MemOperand& src,
1361 LFlag l) { // v5 and above
1362 ldc(coproc, crd, src, l, static_cast<Condition>(nv));
1363}
1364
1365
1366void Assembler::ldc2(Coprocessor coproc,
1367 CRegister crd,
1368 Register rn,
1369 int option,
1370 LFlag l) { // v5 and above
1371 ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1372}
1373
1374
1375void Assembler::stc(Coprocessor coproc,
1376 CRegister crd,
1377 const MemOperand& dst,
1378 LFlag l,
1379 Condition cond) {
1380 addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
1381}
1382
1383
1384void Assembler::stc(Coprocessor coproc,
1385 CRegister crd,
1386 Register rn,
1387 int option,
1388 LFlag l,
1389 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00001390 // Unindexed addressing.
Steve Blocka7e24c12009-10-30 11:49:00 +00001391 ASSERT(is_uint8(option));
1392 emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
1393 coproc*B8 | (option & 255));
1394}
1395
1396
1397void Assembler::stc2(Coprocessor
1398 coproc, CRegister crd,
1399 const MemOperand& dst,
1400 LFlag l) { // v5 and above
1401 stc(coproc, crd, dst, l, static_cast<Condition>(nv));
1402}
1403
1404
1405void Assembler::stc2(Coprocessor coproc,
1406 CRegister crd,
1407 Register rn,
1408 int option,
1409 LFlag l) { // v5 and above
1410 stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1411}
1412
1413
Steve Blockd0582a62009-12-15 09:54:21 +00001414// Support for VFP.
Leon Clarked91b9f72010-01-27 17:25:45 +00001415void Assembler::vldr(const DwVfpRegister dst,
1416 const Register base,
1417 int offset,
1418 const Condition cond) {
1419 // Ddst = MEM(Rbase + offset).
1420 // Instruction details available in ARM DDI 0406A, A8-628.
1421 // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
1422 // Vdst(15-12) | 1011(11-8) | offset
1423 ASSERT(CpuFeatures::IsEnabled(VFP3));
1424 ASSERT(offset % 4 == 0);
Steve Block6ded16b2010-05-10 14:33:55 +01001425 ASSERT((offset / 4) < 256);
Leon Clarked91b9f72010-01-27 17:25:45 +00001426 emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
1427 0xB*B8 | ((offset / 4) & 255));
1428}
1429
1430
Steve Block6ded16b2010-05-10 14:33:55 +01001431void Assembler::vldr(const SwVfpRegister dst,
1432 const Register base,
1433 int offset,
1434 const Condition cond) {
1435 // Sdst = MEM(Rbase + offset).
1436 // Instruction details available in ARM DDI 0406A, A8-628.
1437 // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
1438 // Vdst(15-12) | 1010(11-8) | offset
1439 ASSERT(CpuFeatures::IsEnabled(VFP3));
1440 ASSERT(offset % 4 == 0);
1441 ASSERT((offset / 4) < 256);
1442 emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
1443 0xA*B8 | ((offset / 4) & 255));
1444}
1445
1446
Leon Clarked91b9f72010-01-27 17:25:45 +00001447void Assembler::vstr(const DwVfpRegister src,
1448 const Register base,
1449 int offset,
1450 const Condition cond) {
1451 // MEM(Rbase + offset) = Dsrc.
1452 // Instruction details available in ARM DDI 0406A, A8-786.
1453 // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
1454 // Vsrc(15-12) | 1011(11-8) | (offset/4)
1455 ASSERT(CpuFeatures::IsEnabled(VFP3));
1456 ASSERT(offset % 4 == 0);
Steve Block6ded16b2010-05-10 14:33:55 +01001457 ASSERT((offset / 4) < 256);
Leon Clarked91b9f72010-01-27 17:25:45 +00001458 emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
1459 0xB*B8 | ((offset / 4) & 255));
1460}
1461
1462
Leon Clarkee46be812010-01-19 14:06:41 +00001463void Assembler::vmov(const DwVfpRegister dst,
1464 const Register src1,
1465 const Register src2,
1466 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00001467 // Dm = <Rt,Rt2>.
1468 // Instruction details available in ARM DDI 0406A, A8-646.
1469 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
1470 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
1471 ASSERT(CpuFeatures::IsEnabled(VFP3));
1472 ASSERT(!src1.is(pc) && !src2.is(pc));
1473 emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
1474 src1.code()*B12 | 0xB*B8 | B4 | dst.code());
1475}
1476
1477
Leon Clarkee46be812010-01-19 14:06:41 +00001478void Assembler::vmov(const Register dst1,
1479 const Register dst2,
1480 const DwVfpRegister src,
1481 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00001482 // <Rt,Rt2> = Dm.
1483 // Instruction details available in ARM DDI 0406A, A8-646.
1484 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
1485 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
1486 ASSERT(CpuFeatures::IsEnabled(VFP3));
1487 ASSERT(!dst1.is(pc) && !dst2.is(pc));
1488 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
1489 dst1.code()*B12 | 0xB*B8 | B4 | src.code());
1490}
1491
1492
Leon Clarkee46be812010-01-19 14:06:41 +00001493void Assembler::vmov(const SwVfpRegister dst,
Steve Blockd0582a62009-12-15 09:54:21 +00001494 const Register src,
Steve Blockd0582a62009-12-15 09:54:21 +00001495 const Condition cond) {
1496 // Sn = Rt.
1497 // Instruction details available in ARM DDI 0406A, A8-642.
1498 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
1499 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
1500 ASSERT(CpuFeatures::IsEnabled(VFP3));
1501 ASSERT(!src.is(pc));
1502 emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
1503 src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
1504}
1505
1506
Leon Clarkee46be812010-01-19 14:06:41 +00001507void Assembler::vmov(const Register dst,
1508 const SwVfpRegister src,
Steve Blockd0582a62009-12-15 09:54:21 +00001509 const Condition cond) {
1510 // Rt = Sn.
1511 // Instruction details available in ARM DDI 0406A, A8-642.
1512 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
1513 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
1514 ASSERT(CpuFeatures::IsEnabled(VFP3));
1515 ASSERT(!dst.is(pc));
1516 emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
1517 dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
1518}
1519
1520
Steve Block6ded16b2010-05-10 14:33:55 +01001521// Type of data to read from or write to VFP register.
1522// Used as specifier in generic vcvt instruction.
1523enum VFPType { S32, U32, F32, F64 };
1524
1525
1526static bool IsSignedVFPType(VFPType type) {
1527 switch (type) {
1528 case S32:
1529 return true;
1530 case U32:
1531 return false;
1532 default:
1533 UNREACHABLE();
1534 return false;
1535 }
Steve Blockd0582a62009-12-15 09:54:21 +00001536}
1537
1538
Steve Block6ded16b2010-05-10 14:33:55 +01001539static bool IsIntegerVFPType(VFPType type) {
1540 switch (type) {
1541 case S32:
1542 case U32:
1543 return true;
1544 case F32:
1545 case F64:
1546 return false;
1547 default:
1548 UNREACHABLE();
1549 return false;
1550 }
1551}
1552
1553
1554static bool IsDoubleVFPType(VFPType type) {
1555 switch (type) {
1556 case F32:
1557 return false;
1558 case F64:
1559 return true;
1560 default:
1561 UNREACHABLE();
1562 return false;
1563 }
1564}
1565
1566
1567// Depending on split_last_bit split binary representation of reg_code into Vm:M
1568// or M:Vm form (where M is single bit).
1569static void SplitRegCode(bool split_last_bit,
1570 int reg_code,
1571 int* vm,
1572 int* m) {
1573 if (split_last_bit) {
1574 *m = reg_code & 0x1;
1575 *vm = reg_code >> 1;
1576 } else {
1577 *m = (reg_code & 0x10) >> 4;
1578 *vm = reg_code & 0x0F;
1579 }
1580}
1581
1582
1583// Encode vcvt.src_type.dst_type instruction.
1584static Instr EncodeVCVT(const VFPType dst_type,
1585 const int dst_code,
1586 const VFPType src_type,
1587 const int src_code,
1588 const Condition cond) {
1589 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
1590 // Conversion between IEEE floating point and 32-bit integer.
1591 // Instruction details available in ARM DDI 0406B, A8.6.295.
1592 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
1593 // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
1594 ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
1595
1596 int sz, opc2, D, Vd, M, Vm, op;
1597
1598 if (IsIntegerVFPType(dst_type)) {
1599 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
1600 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
1601 op = 1; // round towards zero
1602 SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
1603 SplitRegCode(true, dst_code, &Vd, &D);
1604 } else {
1605 ASSERT(IsIntegerVFPType(src_type));
1606
1607 opc2 = 0x0;
1608 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
1609 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
1610 SplitRegCode(true, src_code, &Vm, &M);
1611 SplitRegCode(!IsDoubleVFPType(dst_type), dst_code, &Vd, &D);
1612 }
1613
1614 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
1615 Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
1616 } else {
1617 // Conversion between IEEE double and single precision.
1618 // Instruction details available in ARM DDI 0406B, A8.6.298.
1619 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
1620 // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
1621 int sz, D, Vd, M, Vm;
1622
1623 ASSERT(IsDoubleVFPType(dst_type) != IsDoubleVFPType(src_type));
1624 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
1625 SplitRegCode(IsDoubleVFPType(src_type), dst_code, &Vd, &D);
1626 SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
1627
1628 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
1629 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
1630 }
1631}
1632
1633
1634void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
1635 const SwVfpRegister src,
1636 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00001637 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Block6ded16b2010-05-10 14:33:55 +01001638 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), cond));
1639}
1640
1641
1642void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
1643 const SwVfpRegister src,
1644 const Condition cond) {
1645 ASSERT(CpuFeatures::IsEnabled(VFP3));
1646 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), cond));
1647}
1648
1649
1650void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
1651 const SwVfpRegister src,
1652 const Condition cond) {
1653 ASSERT(CpuFeatures::IsEnabled(VFP3));
1654 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), cond));
1655}
1656
1657
1658void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
1659 const DwVfpRegister src,
1660 const Condition cond) {
1661 ASSERT(CpuFeatures::IsEnabled(VFP3));
1662 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), cond));
1663}
1664
1665
1666void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
1667 const DwVfpRegister src,
1668 const Condition cond) {
1669 ASSERT(CpuFeatures::IsEnabled(VFP3));
1670 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), cond));
1671}
1672
1673
1674void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
1675 const SwVfpRegister src,
1676 const Condition cond) {
1677 ASSERT(CpuFeatures::IsEnabled(VFP3));
1678 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), cond));
1679}
1680
1681
1682void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
1683 const DwVfpRegister src,
1684 const Condition cond) {
1685 ASSERT(CpuFeatures::IsEnabled(VFP3));
1686 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), cond));
Steve Blockd0582a62009-12-15 09:54:21 +00001687}
1688
1689
Leon Clarkee46be812010-01-19 14:06:41 +00001690void Assembler::vadd(const DwVfpRegister dst,
1691 const DwVfpRegister src1,
1692 const DwVfpRegister src2,
1693 const Condition cond) {
1694 // Dd = vadd(Dn, Dm) double precision floating point addition.
Steve Blockd0582a62009-12-15 09:54:21 +00001695 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1696 // Instruction details available in ARM DDI 0406A, A8-536.
1697 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
1698 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
1699 ASSERT(CpuFeatures::IsEnabled(VFP3));
1700 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
1701 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
1702}
1703
1704
Leon Clarkee46be812010-01-19 14:06:41 +00001705void Assembler::vsub(const DwVfpRegister dst,
1706 const DwVfpRegister src1,
1707 const DwVfpRegister src2,
1708 const Condition cond) {
1709 // Dd = vsub(Dn, Dm) double precision floating point subtraction.
Steve Blockd0582a62009-12-15 09:54:21 +00001710 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1711 // Instruction details available in ARM DDI 0406A, A8-784.
1712 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
1713 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
1714 ASSERT(CpuFeatures::IsEnabled(VFP3));
1715 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
1716 dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
1717}
1718
1719
Leon Clarkee46be812010-01-19 14:06:41 +00001720void Assembler::vmul(const DwVfpRegister dst,
1721 const DwVfpRegister src1,
1722 const DwVfpRegister src2,
1723 const Condition cond) {
1724 // Dd = vmul(Dn, Dm) double precision floating point multiplication.
Steve Blockd0582a62009-12-15 09:54:21 +00001725 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1726 // Instruction details available in ARM DDI 0406A, A8-784.
1727 // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
1728 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
1729 ASSERT(CpuFeatures::IsEnabled(VFP3));
1730 emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
1731 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
1732}
1733
1734
Leon Clarkee46be812010-01-19 14:06:41 +00001735void Assembler::vdiv(const DwVfpRegister dst,
1736 const DwVfpRegister src1,
1737 const DwVfpRegister src2,
1738 const Condition cond) {
1739 // Dd = vdiv(Dn, Dm) double precision floating point division.
Steve Blockd0582a62009-12-15 09:54:21 +00001740 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
1741 // Instruction details available in ARM DDI 0406A, A8-584.
1742 // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
1743 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
1744 ASSERT(CpuFeatures::IsEnabled(VFP3));
1745 emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
1746 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
1747}
1748
1749
Leon Clarkee46be812010-01-19 14:06:41 +00001750void Assembler::vcmp(const DwVfpRegister src1,
1751 const DwVfpRegister src2,
Steve Blockd0582a62009-12-15 09:54:21 +00001752 const SBit s,
1753 const Condition cond) {
1754 // vcmp(Dd, Dm) double precision floating point comparison.
1755 // Instruction details available in ARM DDI 0406A, A8-570.
1756 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
1757 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
1758 ASSERT(CpuFeatures::IsEnabled(VFP3));
1759 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
1760 src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
1761}
1762
1763
1764void Assembler::vmrs(Register dst, Condition cond) {
1765 // Instruction details available in ARM DDI 0406A, A8-652.
1766 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
1767 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
1768 ASSERT(CpuFeatures::IsEnabled(VFP3));
1769 emit(cond | 0xE*B24 | 0xF*B20 | B16 |
1770 dst.code()*B12 | 0xA*B8 | B4);
1771}
1772
1773
Andrei Popescu31002712010-02-23 13:46:05 +00001774// Pseudo instructions.
Steve Block6ded16b2010-05-10 14:33:55 +01001775void Assembler::nop(int type) {
1776 // This is mov rx, rx.
1777 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
1778 emit(al | 13*B21 | type*B12 | type);
1779}
1780
1781
Steve Blocka7e24c12009-10-30 11:49:00 +00001782void Assembler::lea(Register dst,
1783 const MemOperand& x,
1784 SBit s,
1785 Condition cond) {
1786 int am = x.am_;
1787 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001788 // Immediate offset.
Steve Blocka7e24c12009-10-30 11:49:00 +00001789 if ((am & P) == 0) // post indexing
1790 mov(dst, Operand(x.rn_), s, cond);
1791 else if ((am & U) == 0) // negative indexing
1792 sub(dst, x.rn_, Operand(x.offset_), s, cond);
1793 else
1794 add(dst, x.rn_, Operand(x.offset_), s, cond);
1795 } else {
1796 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1797 // register offset the constructors make sure than both shift_imm_
1798 // and shift_op_ are initialized.
1799 ASSERT(!x.rm_.is(pc));
1800 if ((am & P) == 0) // post indexing
1801 mov(dst, Operand(x.rn_), s, cond);
1802 else if ((am & U) == 0) // negative indexing
1803 sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1804 else
1805 add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
1806 }
1807}
1808
1809
Steve Blockd0582a62009-12-15 09:54:21 +00001810bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
1811 uint32_t dummy1;
1812 uint32_t dummy2;
1813 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
1814}
1815
1816
1817void Assembler::BlockConstPoolFor(int instructions) {
1818 BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
1819}
1820
1821
Andrei Popescu31002712010-02-23 13:46:05 +00001822// Debugging.
Steve Blocka7e24c12009-10-30 11:49:00 +00001823void Assembler::RecordJSReturn() {
1824 WriteRecordedPositions();
1825 CheckBuffer();
1826 RecordRelocInfo(RelocInfo::JS_RETURN);
1827}
1828
1829
1830void Assembler::RecordComment(const char* msg) {
1831 if (FLAG_debug_code) {
1832 CheckBuffer();
1833 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1834 }
1835}
1836
1837
1838void Assembler::RecordPosition(int pos) {
1839 if (pos == RelocInfo::kNoPosition) return;
1840 ASSERT(pos >= 0);
1841 current_position_ = pos;
1842}
1843
1844
1845void Assembler::RecordStatementPosition(int pos) {
1846 if (pos == RelocInfo::kNoPosition) return;
1847 ASSERT(pos >= 0);
1848 current_statement_position_ = pos;
1849}
1850
1851
1852void Assembler::WriteRecordedPositions() {
1853 // Write the statement position if it is different from what was written last
1854 // time.
1855 if (current_statement_position_ != written_statement_position_) {
1856 CheckBuffer();
1857 RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
1858 written_statement_position_ = current_statement_position_;
1859 }
1860
1861 // Write the position if it is different from what was written last time and
1862 // also different from the written statement position.
1863 if (current_position_ != written_position_ &&
1864 current_position_ != written_statement_position_) {
1865 CheckBuffer();
1866 RecordRelocInfo(RelocInfo::POSITION, current_position_);
1867 written_position_ = current_position_;
1868 }
1869}
1870
1871
1872void Assembler::GrowBuffer() {
1873 if (!own_buffer_) FATAL("external code buffer is too small");
1874
Andrei Popescu31002712010-02-23 13:46:05 +00001875 // Compute new buffer size.
Steve Blocka7e24c12009-10-30 11:49:00 +00001876 CodeDesc desc; // the new buffer
1877 if (buffer_size_ < 4*KB) {
1878 desc.buffer_size = 4*KB;
1879 } else if (buffer_size_ < 1*MB) {
1880 desc.buffer_size = 2*buffer_size_;
1881 } else {
1882 desc.buffer_size = buffer_size_ + 1*MB;
1883 }
1884 CHECK_GT(desc.buffer_size, 0); // no overflow
1885
Andrei Popescu31002712010-02-23 13:46:05 +00001886 // Setup new buffer.
Steve Blocka7e24c12009-10-30 11:49:00 +00001887 desc.buffer = NewArray<byte>(desc.buffer_size);
1888
1889 desc.instr_size = pc_offset();
1890 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1891
Andrei Popescu31002712010-02-23 13:46:05 +00001892 // Copy the data.
Steve Blocka7e24c12009-10-30 11:49:00 +00001893 int pc_delta = desc.buffer - buffer_;
1894 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1895 memmove(desc.buffer, buffer_, desc.instr_size);
1896 memmove(reloc_info_writer.pos() + rc_delta,
1897 reloc_info_writer.pos(), desc.reloc_size);
1898
Andrei Popescu31002712010-02-23 13:46:05 +00001899 // Switch buffers.
Steve Blocka7e24c12009-10-30 11:49:00 +00001900 DeleteArray(buffer_);
1901 buffer_ = desc.buffer;
1902 buffer_size_ = desc.buffer_size;
1903 pc_ += pc_delta;
1904 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1905 reloc_info_writer.last_pc() + pc_delta);
1906
Andrei Popescu31002712010-02-23 13:46:05 +00001907 // None of our relocation types are pc relative pointing outside the code
Steve Blocka7e24c12009-10-30 11:49:00 +00001908 // buffer nor pc absolute pointing inside the code buffer, so there is no need
Andrei Popescu31002712010-02-23 13:46:05 +00001909 // to relocate any emitted relocation entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00001910
Andrei Popescu31002712010-02-23 13:46:05 +00001911 // Relocate pending relocation entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00001912 for (int i = 0; i < num_prinfo_; i++) {
1913 RelocInfo& rinfo = prinfo_[i];
1914 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
1915 rinfo.rmode() != RelocInfo::POSITION);
1916 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
1917 rinfo.set_pc(rinfo.pc() + pc_delta);
1918 }
1919 }
1920}
1921
1922
1923void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1924 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
1925 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
Andrei Popescu31002712010-02-23 13:46:05 +00001926 // Adjust code for new modes.
Steve Blocka7e24c12009-10-30 11:49:00 +00001927 ASSERT(RelocInfo::IsJSReturn(rmode)
1928 || RelocInfo::IsComment(rmode)
1929 || RelocInfo::IsPosition(rmode));
Andrei Popescu31002712010-02-23 13:46:05 +00001930 // These modes do not need an entry in the constant pool.
Steve Blocka7e24c12009-10-30 11:49:00 +00001931 } else {
1932 ASSERT(num_prinfo_ < kMaxNumPRInfo);
1933 prinfo_[num_prinfo_++] = rinfo;
1934 // Make sure the constant pool is not emitted in place of the next
Andrei Popescu31002712010-02-23 13:46:05 +00001935 // instruction for which we just recorded relocation info.
Steve Blocka7e24c12009-10-30 11:49:00 +00001936 BlockConstPoolBefore(pc_offset() + kInstrSize);
1937 }
1938 if (rinfo.rmode() != RelocInfo::NONE) {
1939 // Don't record external references unless the heap will be serialized.
Steve Blockd0582a62009-12-15 09:54:21 +00001940 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
1941#ifdef DEBUG
1942 if (!Serializer::enabled()) {
1943 Serializer::TooLateToEnableNow();
1944 }
1945#endif
1946 if (!Serializer::enabled() && !FLAG_debug_code) {
1947 return;
1948 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001949 }
1950 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
1951 reloc_info_writer.Write(&rinfo);
1952 }
1953}
1954
1955
1956void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
1957 // Calculate the offset of the next check. It will be overwritten
1958 // when a const pool is generated or when const pools are being
1959 // blocked for a specific range.
1960 next_buffer_check_ = pc_offset() + kCheckConstInterval;
1961
Andrei Popescu31002712010-02-23 13:46:05 +00001962 // There is nothing to do if there are no pending relocation info entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00001963 if (num_prinfo_ == 0) return;
1964
1965 // We emit a constant pool at regular intervals of about kDistBetweenPools
1966 // or when requested by parameter force_emit (e.g. after each function).
1967 // We prefer not to emit a jump unless the max distance is reached or if we
1968 // are running low on slots, which can happen if a lot of constants are being
1969 // emitted (e.g. --debug-code and many static references).
1970 int dist = pc_offset() - last_const_pool_end_;
1971 if (!force_emit && dist < kMaxDistBetweenPools &&
1972 (require_jump || dist < kDistBetweenPools) &&
1973 // TODO(1236125): Cleanup the "magic" number below. We know that
1974 // the code generation will test every kCheckConstIntervalInst.
1975 // Thus we are safe as long as we generate less than 7 constant
1976 // entries per instruction.
1977 (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
1978 return;
1979 }
1980
1981 // If we did not return by now, we need to emit the constant pool soon.
1982
1983 // However, some small sequences of instructions must not be broken up by the
1984 // insertion of a constant pool; such sequences are protected by setting
Steve Block6ded16b2010-05-10 14:33:55 +01001985 // either const_pool_blocked_nesting_ or no_const_pool_before_, which are
1986 // both checked here. Also, recursive calls to CheckConstPool are blocked by
1987 // no_const_pool_before_.
1988 if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
Andrei Popescu31002712010-02-23 13:46:05 +00001989 // Emission is currently blocked; make sure we try again as soon as
1990 // possible.
Steve Block6ded16b2010-05-10 14:33:55 +01001991 if (const_pool_blocked_nesting_ > 0) {
1992 next_buffer_check_ = pc_offset() + kInstrSize;
1993 } else {
1994 next_buffer_check_ = no_const_pool_before_;
1995 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001996
Andrei Popescu31002712010-02-23 13:46:05 +00001997 // Something is wrong if emission is forced and blocked at the same time.
Steve Blocka7e24c12009-10-30 11:49:00 +00001998 ASSERT(!force_emit);
1999 return;
2000 }
2001
2002 int jump_instr = require_jump ? kInstrSize : 0;
2003
2004 // Check that the code buffer is large enough before emitting the constant
2005 // pool and relocation information (include the jump over the pool and the
2006 // constant pool marker).
2007 int max_needed_space =
2008 jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
2009 while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
2010
Andrei Popescu31002712010-02-23 13:46:05 +00002011 // Block recursive calls to CheckConstPool.
Steve Blocka7e24c12009-10-30 11:49:00 +00002012 BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
2013 num_prinfo_*kInstrSize);
2014 // Don't bother to check for the emit calls below.
2015 next_buffer_check_ = no_const_pool_before_;
2016
Andrei Popescu31002712010-02-23 13:46:05 +00002017 // Emit jump over constant pool if necessary.
Steve Blocka7e24c12009-10-30 11:49:00 +00002018 Label after_pool;
2019 if (require_jump) b(&after_pool);
2020
2021 RecordComment("[ Constant Pool");
2022
Andrei Popescu31002712010-02-23 13:46:05 +00002023 // Put down constant pool marker "Undefined instruction" as specified by
2024 // A3.1 Instruction set encoding.
Steve Blocka7e24c12009-10-30 11:49:00 +00002025 emit(0x03000000 | num_prinfo_);
2026
Andrei Popescu31002712010-02-23 13:46:05 +00002027 // Emit constant pool entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00002028 for (int i = 0; i < num_prinfo_; i++) {
2029 RelocInfo& rinfo = prinfo_[i];
2030 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2031 rinfo.rmode() != RelocInfo::POSITION &&
2032 rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
2033 Instr instr = instr_at(rinfo.pc());
2034
Andrei Popescu31002712010-02-23 13:46:05 +00002035 // Instruction to patch must be a ldr/str [pc, #offset].
2036 // P and U set, B and W clear, Rn == pc, offset12 still 0.
Steve Blocka7e24c12009-10-30 11:49:00 +00002037 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
2038 (2*B25 | P | U | pc.code()*B16));
2039 int delta = pc_ - rinfo.pc() - 8;
2040 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
2041 if (delta < 0) {
2042 instr &= ~U;
2043 delta = -delta;
2044 }
2045 ASSERT(is_uint12(delta));
2046 instr_at_put(rinfo.pc(), instr + delta);
2047 emit(rinfo.data());
2048 }
2049 num_prinfo_ = 0;
2050 last_const_pool_end_ = pc_offset();
2051
2052 RecordComment("]");
2053
2054 if (after_pool.is_linked()) {
2055 bind(&after_pool);
2056 }
2057
2058 // Since a constant pool was just emitted, move the check offset forward by
2059 // the standard interval.
2060 next_buffer_check_ = pc_offset() + kCheckConstInterval;
2061}
2062
2063
2064} } // namespace v8::internal