blob: 8fdcf182163b2e9fb3b2eb6b01bcaca88c2e7071 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
Leon Clarked91b9f72010-01-27 17:25:45 +000033// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
35// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +000036
37#include "v8.h"
38
Leon Clarkef7060e22010-06-03 12:02:55 +010039#if defined(V8_TARGET_ARCH_ARM)
40
Steve Blocka7e24c12009-10-30 11:49:00 +000041#include "arm/assembler-arm-inl.h"
42#include "serialize.h"
43
44namespace v8 {
45namespace internal {
46
Steve Blockd0582a62009-12-15 09:54:21 +000047// Safe default is no features.
48unsigned CpuFeatures::supported_ = 0;
49unsigned CpuFeatures::enabled_ = 0;
50unsigned CpuFeatures::found_by_runtime_probing_ = 0;
51
Andrei Popescu402d9372010-02-26 13:31:12 +000052
53#ifdef __arm__
54static uint64_t CpuFeaturesImpliedByCompiler() {
55 uint64_t answer = 0;
56#ifdef CAN_USE_ARMV7_INSTRUCTIONS
57 answer |= 1u << ARMv7;
58#endif // def CAN_USE_ARMV7_INSTRUCTIONS
59 // If the compiler is allowed to use VFP then we can use VFP too in our code
60 // generation even when generating snapshots. This won't work for cross
61 // compilation.
62#if defined(__VFP_FP__) && !defined(__SOFTFP__)
63 answer |= 1u << VFP3;
64#endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
65#ifdef CAN_USE_VFP_INSTRUCTIONS
66 answer |= 1u << VFP3;
67#endif // def CAN_USE_VFP_INSTRUCTIONS
68 return answer;
69}
70#endif // def __arm__
71
72
Ben Murdochb0fe1622011-05-05 13:52:32 +010073void CpuFeatures::Probe(bool portable) {
Andrei Popescu402d9372010-02-26 13:31:12 +000074#ifndef __arm__
Andrei Popescu31002712010-02-23 13:46:05 +000075 // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
76 if (FLAG_enable_vfp3) {
Steve Block6ded16b2010-05-10 14:33:55 +010077 supported_ |= 1u << VFP3;
Andrei Popescu31002712010-02-23 13:46:05 +000078 }
79 // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
80 if (FLAG_enable_armv7) {
Steve Block6ded16b2010-05-10 14:33:55 +010081 supported_ |= 1u << ARMv7;
Andrei Popescu31002712010-02-23 13:46:05 +000082 }
Andrei Popescu402d9372010-02-26 13:31:12 +000083#else // def __arm__
Ben Murdochb0fe1622011-05-05 13:52:32 +010084 if (portable && Serializer::enabled()) {
Andrei Popescu402d9372010-02-26 13:31:12 +000085 supported_ |= OS::CpuFeaturesImpliedByPlatform();
86 supported_ |= CpuFeaturesImpliedByCompiler();
Steve Blockd0582a62009-12-15 09:54:21 +000087 return; // No features if we might serialize.
88 }
89
90 if (OS::ArmCpuHasFeature(VFP3)) {
91 // This implementation also sets the VFP flags if
92 // runtime detection of VFP returns true.
93 supported_ |= 1u << VFP3;
94 found_by_runtime_probing_ |= 1u << VFP3;
95 }
Andrei Popescu31002712010-02-23 13:46:05 +000096
97 if (OS::ArmCpuHasFeature(ARMv7)) {
98 supported_ |= 1u << ARMv7;
99 found_by_runtime_probing_ |= 1u << ARMv7;
100 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100101
102 if (!portable) found_by_runtime_probing_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100103#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000104}
105
106
Steve Blocka7e24c12009-10-30 11:49:00 +0000107// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000108// Implementation of RelocInfo
109
110const int RelocInfo::kApplyMask = 0;
111
112
Leon Clarkef7060e22010-06-03 12:02:55 +0100113bool RelocInfo::IsCodedSpecially() {
114 // The deserializer needs to know whether a pointer is specially coded. Being
115 // specially coded on ARM means that it is a movw/movt instruction. We don't
116 // generate those yet.
117 return false;
118}
119
120
121
Steve Blocka7e24c12009-10-30 11:49:00 +0000122void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
123 // Patch the code at the current address with the supplied instructions.
124 Instr* pc = reinterpret_cast<Instr*>(pc_);
125 Instr* instr = reinterpret_cast<Instr*>(instructions);
126 for (int i = 0; i < instruction_count; i++) {
127 *(pc + i) = *(instr + i);
128 }
129
130 // Indicate that code has changed.
131 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
132}
133
134
135// Patch the code at the current PC with a call to the target address.
136// Additional guard instructions can be added if required.
137void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
138 // Patch the code at the current address with a call to the target.
139 UNIMPLEMENTED();
140}
141
142
143// -----------------------------------------------------------------------------
144// Implementation of Operand and MemOperand
145// See assembler-arm-inl.h for inlined constructors
146
147Operand::Operand(Handle<Object> handle) {
148 rm_ = no_reg;
149 // Verify all Objects referred by code are NOT in new space.
150 Object* obj = *handle;
151 ASSERT(!Heap::InNewSpace(obj));
152 if (obj->IsHeapObject()) {
153 imm32_ = reinterpret_cast<intptr_t>(handle.location());
154 rmode_ = RelocInfo::EMBEDDED_OBJECT;
155 } else {
156 // no relocation needed
157 imm32_ = reinterpret_cast<intptr_t>(obj);
158 rmode_ = RelocInfo::NONE;
159 }
160}
161
162
163Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
164 ASSERT(is_uint5(shift_imm));
165 ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
166 rm_ = rm;
167 rs_ = no_reg;
168 shift_op_ = shift_op;
169 shift_imm_ = shift_imm & 31;
170 if (shift_op == RRX) {
171 // encoded as ROR with shift_imm == 0
172 ASSERT(shift_imm == 0);
173 shift_op_ = ROR;
174 shift_imm_ = 0;
175 }
176}
177
178
179Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
180 ASSERT(shift_op != RRX);
181 rm_ = rm;
182 rs_ = no_reg;
183 shift_op_ = shift_op;
184 rs_ = rs;
185}
186
187
188MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
189 rn_ = rn;
190 rm_ = no_reg;
191 offset_ = offset;
192 am_ = am;
193}
194
195MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
196 rn_ = rn;
197 rm_ = rm;
198 shift_op_ = LSL;
199 shift_imm_ = 0;
200 am_ = am;
201}
202
203
204MemOperand::MemOperand(Register rn, Register rm,
205 ShiftOp shift_op, int shift_imm, AddrMode am) {
206 ASSERT(is_uint5(shift_imm));
207 rn_ = rn;
208 rm_ = rm;
209 shift_op_ = shift_op;
210 shift_imm_ = shift_imm & 31;
211 am_ = am;
212}
213
214
215// -----------------------------------------------------------------------------
Andrei Popescu31002712010-02-23 13:46:05 +0000216// Implementation of Assembler.
Steve Blocka7e24c12009-10-30 11:49:00 +0000217
Andrei Popescu31002712010-02-23 13:46:05 +0000218// Instruction encoding bits.
Steve Blocka7e24c12009-10-30 11:49:00 +0000219enum {
220 H = 1 << 5, // halfword (or byte)
221 S6 = 1 << 6, // signed (or unsigned)
222 L = 1 << 20, // load (or store)
223 S = 1 << 20, // set condition code (or leave unchanged)
224 W = 1 << 21, // writeback base register (or leave unchanged)
225 A = 1 << 21, // accumulate in multiply instruction (or not)
226 B = 1 << 22, // unsigned byte (or word)
227 N = 1 << 22, // long (or short)
228 U = 1 << 23, // positive (or negative) offset/index
229 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
230 I = 1 << 25, // immediate shifter operand (or not)
231
232 B4 = 1 << 4,
233 B5 = 1 << 5,
Steve Blockd0582a62009-12-15 09:54:21 +0000234 B6 = 1 << 6,
Steve Blocka7e24c12009-10-30 11:49:00 +0000235 B7 = 1 << 7,
236 B8 = 1 << 8,
Steve Blockd0582a62009-12-15 09:54:21 +0000237 B9 = 1 << 9,
Steve Blocka7e24c12009-10-30 11:49:00 +0000238 B12 = 1 << 12,
239 B16 = 1 << 16,
Steve Blockd0582a62009-12-15 09:54:21 +0000240 B18 = 1 << 18,
241 B19 = 1 << 19,
Steve Blocka7e24c12009-10-30 11:49:00 +0000242 B20 = 1 << 20,
243 B21 = 1 << 21,
244 B22 = 1 << 22,
245 B23 = 1 << 23,
246 B24 = 1 << 24,
247 B25 = 1 << 25,
248 B26 = 1 << 26,
249 B27 = 1 << 27,
250
Andrei Popescu31002712010-02-23 13:46:05 +0000251 // Instruction bit masks.
Steve Blocka7e24c12009-10-30 11:49:00 +0000252 RdMask = 15 << 12, // in str instruction
253 CondMask = 15 << 28,
254 CoprocessorMask = 15 << 8,
255 OpCodeMask = 15 << 21, // in data-processing instructions
256 Imm24Mask = (1 << 24) - 1,
257 Off12Mask = (1 << 12) - 1,
Andrei Popescu31002712010-02-23 13:46:05 +0000258 // Reserved condition.
Steve Blocka7e24c12009-10-30 11:49:00 +0000259 nv = 15 << 28
260};
261
262
263// add(sp, sp, 4) instruction (aka Pop())
264static const Instr kPopInstruction =
265 al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
266// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
267// register r is not encoded.
268static const Instr kPushRegPattern =
269 al | B26 | 4 | NegPreIndex | sp.code() * B16;
270// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
271// register r is not encoded.
272static const Instr kPopRegPattern =
273 al | B26 | L | 4 | PostIndex | sp.code() * B16;
274// mov lr, pc
275const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
Steve Block6ded16b2010-05-10 14:33:55 +0100276// ldr rd, [pc, #offset]
277const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16;
278const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
279// blxcc rm
280const Instr kBlxRegMask =
281 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
282const Instr kBlxRegPattern =
283 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100284const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
285const Instr kMovMvnPattern = 0xd * B21;
286const Instr kMovMvnFlip = B22;
287const Instr kMovLeaveCCMask = 0xdff * B16;
288const Instr kMovLeaveCCPattern = 0x1a0 * B16;
289const Instr kMovwMask = 0xff * B20;
290const Instr kMovwPattern = 0x30 * B20;
291const Instr kMovwLeaveCCFlip = 0x5 * B21;
292const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
293const Instr kCmpCmnPattern = 0x15 * B20;
294const Instr kCmpCmnFlip = B21;
295const Instr kALUMask = 0x6f * B21;
296const Instr kAddPattern = 0x4 * B21;
297const Instr kSubPattern = 0x2 * B21;
298const Instr kBicPattern = 0xe * B21;
299const Instr kAndPattern = 0x0 * B21;
300const Instr kAddSubFlip = 0x6 * B21;
301const Instr kAndBicFlip = 0xe * B21;
302
Leon Clarkef7060e22010-06-03 12:02:55 +0100303// A mask for the Rd register for push, pop, ldr, str instructions.
304const Instr kRdMask = 0x0000f000;
305static const int kRdShift = 12;
306static const Instr kLdrRegFpOffsetPattern =
307 al | B26 | L | Offset | fp.code() * B16;
308static const Instr kStrRegFpOffsetPattern =
309 al | B26 | Offset | fp.code() * B16;
310static const Instr kLdrRegFpNegOffsetPattern =
311 al | B26 | L | NegOffset | fp.code() * B16;
312static const Instr kStrRegFpNegOffsetPattern =
313 al | B26 | NegOffset | fp.code() * B16;
314static const Instr kLdrStrInstrTypeMask = 0xffff0000;
315static const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
316static const Instr kLdrStrOffsetMask = 0x00000fff;
Steve Blocka7e24c12009-10-30 11:49:00 +0000317
Andrei Popescu31002712010-02-23 13:46:05 +0000318// Spare buffer.
Steve Blocka7e24c12009-10-30 11:49:00 +0000319static const int kMinimalBufferSize = 4*KB;
320static byte* spare_buffer_ = NULL;
321
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800322Assembler::Assembler(void* buffer, int buffer_size)
Ben Murdochb0fe1622011-05-05 13:52:32 +0100323 : positions_recorder_(this),
324 allow_peephole_optimization_(false) {
325 // BUG(3245989): disable peephole optimization if crankshaft is enabled.
326 allow_peephole_optimization_ = FLAG_peephole_optimization;
Steve Blocka7e24c12009-10-30 11:49:00 +0000327 if (buffer == NULL) {
Andrei Popescu31002712010-02-23 13:46:05 +0000328 // Do our own buffer management.
Steve Blocka7e24c12009-10-30 11:49:00 +0000329 if (buffer_size <= kMinimalBufferSize) {
330 buffer_size = kMinimalBufferSize;
331
332 if (spare_buffer_ != NULL) {
333 buffer = spare_buffer_;
334 spare_buffer_ = NULL;
335 }
336 }
337 if (buffer == NULL) {
338 buffer_ = NewArray<byte>(buffer_size);
339 } else {
340 buffer_ = static_cast<byte*>(buffer);
341 }
342 buffer_size_ = buffer_size;
343 own_buffer_ = true;
344
345 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000346 // Use externally provided buffer instead.
Steve Blocka7e24c12009-10-30 11:49:00 +0000347 ASSERT(buffer_size > 0);
348 buffer_ = static_cast<byte*>(buffer);
349 buffer_size_ = buffer_size;
350 own_buffer_ = false;
351 }
352
Andrei Popescu31002712010-02-23 13:46:05 +0000353 // Setup buffer pointers.
Steve Blocka7e24c12009-10-30 11:49:00 +0000354 ASSERT(buffer_ != NULL);
355 pc_ = buffer_;
356 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
357 num_prinfo_ = 0;
358 next_buffer_check_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100359 const_pool_blocked_nesting_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000360 no_const_pool_before_ = 0;
361 last_const_pool_end_ = 0;
362 last_bound_pos_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000363}
364
365
366Assembler::~Assembler() {
Steve Block6ded16b2010-05-10 14:33:55 +0100367 ASSERT(const_pool_blocked_nesting_ == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000368 if (own_buffer_) {
369 if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
370 spare_buffer_ = buffer_;
371 } else {
372 DeleteArray(buffer_);
373 }
374 }
375}
376
377
378void Assembler::GetCode(CodeDesc* desc) {
Andrei Popescu31002712010-02-23 13:46:05 +0000379 // Emit constant pool if necessary.
Steve Blocka7e24c12009-10-30 11:49:00 +0000380 CheckConstPool(true, false);
381 ASSERT(num_prinfo_ == 0);
382
Andrei Popescu31002712010-02-23 13:46:05 +0000383 // Setup code descriptor.
Steve Blocka7e24c12009-10-30 11:49:00 +0000384 desc->buffer = buffer_;
385 desc->buffer_size = buffer_size_;
386 desc->instr_size = pc_offset();
387 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
388}
389
390
391void Assembler::Align(int m) {
392 ASSERT(m >= 4 && IsPowerOf2(m));
393 while ((pc_offset() & (m - 1)) != 0) {
394 nop();
395 }
396}
397
398
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100399void Assembler::CodeTargetAlign() {
400 // Preferred alignment of jump targets on some ARM chips.
401 Align(8);
402}
403
404
Steve Block6ded16b2010-05-10 14:33:55 +0100405bool Assembler::IsBranch(Instr instr) {
406 return (instr & (B27 | B25)) == (B27 | B25);
407}
408
409
410int Assembler::GetBranchOffset(Instr instr) {
411 ASSERT(IsBranch(instr));
412 // Take the jump offset in the lower 24 bits, sign extend it and multiply it
413 // with 4 to get the offset in bytes.
414 return ((instr & Imm24Mask) << 8) >> 6;
415}
416
417
418bool Assembler::IsLdrRegisterImmediate(Instr instr) {
419 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
420}
421
422
423int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
424 ASSERT(IsLdrRegisterImmediate(instr));
425 bool positive = (instr & B23) == B23;
426 int offset = instr & Off12Mask; // Zero extended offset.
427 return positive ? offset : -offset;
428}
429
430
431Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
432 ASSERT(IsLdrRegisterImmediate(instr));
433 bool positive = offset >= 0;
434 if (!positive) offset = -offset;
435 ASSERT(is_uint12(offset));
436 // Set bit indicating whether the offset should be added.
437 instr = (instr & ~B23) | (positive ? B23 : 0);
438 // Set the actual offset.
439 return (instr & ~Off12Mask) | offset;
440}
441
442
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100443bool Assembler::IsStrRegisterImmediate(Instr instr) {
444 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
445}
446
447
448Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
449 ASSERT(IsStrRegisterImmediate(instr));
450 bool positive = offset >= 0;
451 if (!positive) offset = -offset;
452 ASSERT(is_uint12(offset));
453 // Set bit indicating whether the offset should be added.
454 instr = (instr & ~B23) | (positive ? B23 : 0);
455 // Set the actual offset.
456 return (instr & ~Off12Mask) | offset;
457}
458
459
460bool Assembler::IsAddRegisterImmediate(Instr instr) {
461 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
462}
463
464
465Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
466 ASSERT(IsAddRegisterImmediate(instr));
467 ASSERT(offset >= 0);
468 ASSERT(is_uint12(offset));
469 // Set the offset.
470 return (instr & ~Off12Mask) | offset;
471}
472
473
Leon Clarkef7060e22010-06-03 12:02:55 +0100474Register Assembler::GetRd(Instr instr) {
475 Register reg;
476 reg.code_ = ((instr & kRdMask) >> kRdShift);
477 return reg;
478}
479
480
481bool Assembler::IsPush(Instr instr) {
482 return ((instr & ~kRdMask) == kPushRegPattern);
483}
484
485
486bool Assembler::IsPop(Instr instr) {
487 return ((instr & ~kRdMask) == kPopRegPattern);
488}
489
490
491bool Assembler::IsStrRegFpOffset(Instr instr) {
492 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
493}
494
495
496bool Assembler::IsLdrRegFpOffset(Instr instr) {
497 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
498}
499
500
501bool Assembler::IsStrRegFpNegOffset(Instr instr) {
502 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
503}
504
505
506bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
507 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
508}
509
510
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800511bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
512 // Check the instruction is indeed a
513 // ldr<cond> <Rd>, [pc +/- offset_12].
514 return (instr & 0x0f7f0000) == 0x051f0000;
515}
516
517
Steve Blocka7e24c12009-10-30 11:49:00 +0000518// Labels refer to positions in the (to be) generated code.
519// There are bound, linked, and unused labels.
520//
521// Bound labels refer to known positions in the already
522// generated code. pos() is the position the label refers to.
523//
524// Linked labels refer to unknown positions in the code
525// to be generated; pos() is the position of the last
526// instruction using the label.
527
528
529// The link chain is terminated by a negative code position (must be aligned)
530const int kEndOfChain = -4;
531
532
533int Assembler::target_at(int pos) {
534 Instr instr = instr_at(pos);
535 if ((instr & ~Imm24Mask) == 0) {
536 // Emitted label constant, not part of a branch.
537 return instr - (Code::kHeaderSize - kHeapObjectTag);
538 }
539 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
540 int imm26 = ((instr & Imm24Mask) << 8) >> 6;
Steve Block6ded16b2010-05-10 14:33:55 +0100541 if ((instr & CondMask) == nv && (instr & B24) != 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000542 // blx uses bit 24 to encode bit 2 of imm26
543 imm26 += 2;
Steve Block6ded16b2010-05-10 14:33:55 +0100544 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000545 return pos + kPcLoadDelta + imm26;
546}
547
548
549void Assembler::target_at_put(int pos, int target_pos) {
550 Instr instr = instr_at(pos);
551 if ((instr & ~Imm24Mask) == 0) {
552 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
553 // Emitted label constant, not part of a branch.
554 // Make label relative to Code* of generated Code object.
555 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
556 return;
557 }
558 int imm26 = target_pos - (pos + kPcLoadDelta);
559 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
560 if ((instr & CondMask) == nv) {
561 // blx uses bit 24 to encode bit 2 of imm26
562 ASSERT((imm26 & 1) == 0);
563 instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
564 } else {
565 ASSERT((imm26 & 3) == 0);
566 instr &= ~Imm24Mask;
567 }
568 int imm24 = imm26 >> 2;
569 ASSERT(is_int24(imm24));
570 instr_at_put(pos, instr | (imm24 & Imm24Mask));
571}
572
573
574void Assembler::print(Label* L) {
575 if (L->is_unused()) {
576 PrintF("unused label\n");
577 } else if (L->is_bound()) {
578 PrintF("bound label to %d\n", L->pos());
579 } else if (L->is_linked()) {
580 Label l = *L;
581 PrintF("unbound label");
582 while (l.is_linked()) {
583 PrintF("@ %d ", l.pos());
584 Instr instr = instr_at(l.pos());
585 if ((instr & ~Imm24Mask) == 0) {
586 PrintF("value\n");
587 } else {
588 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
589 int cond = instr & CondMask;
590 const char* b;
591 const char* c;
592 if (cond == nv) {
593 b = "blx";
594 c = "";
595 } else {
596 if ((instr & B24) != 0)
597 b = "bl";
598 else
599 b = "b";
600
601 switch (cond) {
602 case eq: c = "eq"; break;
603 case ne: c = "ne"; break;
604 case hs: c = "hs"; break;
605 case lo: c = "lo"; break;
606 case mi: c = "mi"; break;
607 case pl: c = "pl"; break;
608 case vs: c = "vs"; break;
609 case vc: c = "vc"; break;
610 case hi: c = "hi"; break;
611 case ls: c = "ls"; break;
612 case ge: c = "ge"; break;
613 case lt: c = "lt"; break;
614 case gt: c = "gt"; break;
615 case le: c = "le"; break;
616 case al: c = ""; break;
617 default:
618 c = "";
619 UNREACHABLE();
620 }
621 }
622 PrintF("%s%s\n", b, c);
623 }
624 next(&l);
625 }
626 } else {
627 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
628 }
629}
630
631
632void Assembler::bind_to(Label* L, int pos) {
633 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
634 while (L->is_linked()) {
635 int fixup_pos = L->pos();
636 next(L); // call next before overwriting link with target at fixup_pos
637 target_at_put(fixup_pos, pos);
638 }
639 L->bind_to(pos);
640
641 // Keep track of the last bound label so we don't eliminate any instructions
642 // before a bound label.
643 if (pos > last_bound_pos_)
644 last_bound_pos_ = pos;
645}
646
647
648void Assembler::link_to(Label* L, Label* appendix) {
649 if (appendix->is_linked()) {
650 if (L->is_linked()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000651 // Append appendix to L's list.
Steve Blocka7e24c12009-10-30 11:49:00 +0000652 int fixup_pos;
653 int link = L->pos();
654 do {
655 fixup_pos = link;
656 link = target_at(fixup_pos);
657 } while (link > 0);
658 ASSERT(link == kEndOfChain);
659 target_at_put(fixup_pos, appendix->pos());
660 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000661 // L is empty, simply use appendix.
Steve Blocka7e24c12009-10-30 11:49:00 +0000662 *L = *appendix;
663 }
664 }
665 appendix->Unuse(); // appendix should not be used anymore
666}
667
668
669void Assembler::bind(Label* L) {
670 ASSERT(!L->is_bound()); // label can only be bound once
671 bind_to(L, pc_offset());
672}
673
674
675void Assembler::next(Label* L) {
676 ASSERT(L->is_linked());
677 int link = target_at(L->pos());
678 if (link > 0) {
679 L->link_to(link);
680 } else {
681 ASSERT(link == kEndOfChain);
682 L->Unuse();
683 }
684}
685
686
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100687static Instr EncodeMovwImmediate(uint32_t immediate) {
688 ASSERT(immediate < 0x10000);
689 return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
690}
691
692
Andrei Popescu31002712010-02-23 13:46:05 +0000693// Low-level code emission routines depending on the addressing mode.
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100694// If this returns true then you have to use the rotate_imm and immed_8
695// that it returns, because it may have already changed the instruction
696// to match them!
Steve Blocka7e24c12009-10-30 11:49:00 +0000697static bool fits_shifter(uint32_t imm32,
698 uint32_t* rotate_imm,
699 uint32_t* immed_8,
700 Instr* instr) {
Andrei Popescu31002712010-02-23 13:46:05 +0000701 // imm32 must be unsigned.
Steve Blocka7e24c12009-10-30 11:49:00 +0000702 for (int rot = 0; rot < 16; rot++) {
703 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
704 if ((imm8 <= 0xff)) {
705 *rotate_imm = rot;
706 *immed_8 = imm8;
707 return true;
708 }
709 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100710 // If the opcode is one with a complementary version and the complementary
711 // immediate fits, change the opcode.
712 if (instr != NULL) {
713 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
714 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
715 *instr ^= kMovMvnFlip;
716 return true;
717 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
718 if (CpuFeatures::IsSupported(ARMv7)) {
719 if (imm32 < 0x10000) {
720 *instr ^= kMovwLeaveCCFlip;
721 *instr |= EncodeMovwImmediate(imm32);
722 *rotate_imm = *immed_8 = 0; // Not used for movw.
723 return true;
724 }
725 }
726 }
727 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
728 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
729 *instr ^= kCmpCmnFlip;
730 return true;
731 }
732 } else {
733 Instr alu_insn = (*instr & kALUMask);
734 if (alu_insn == kAddPattern ||
735 alu_insn == kSubPattern) {
736 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
737 *instr ^= kAddSubFlip;
738 return true;
739 }
740 } else if (alu_insn == kAndPattern ||
741 alu_insn == kBicPattern) {
742 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
743 *instr ^= kAndBicFlip;
744 return true;
745 }
746 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000747 }
748 }
749 return false;
750}
751
752
753// We have to use the temporary register for things that can be relocated even
754// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
755// space. There is no guarantee that the relocated location can be similarly
756// encoded.
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800757bool Operand::must_use_constant_pool() const {
758 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
Steve Blockd0582a62009-12-15 09:54:21 +0000759#ifdef DEBUG
760 if (!Serializer::enabled()) {
761 Serializer::TooLateToEnableNow();
762 }
Andrei Popescu402d9372010-02-26 13:31:12 +0000763#endif // def DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000764 return Serializer::enabled();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800765 } else if (rmode_ == RelocInfo::NONE) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000766 return false;
767 }
768 return true;
769}
770
771
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100772bool Operand::is_single_instruction() const {
773 if (rm_.is_valid()) return true;
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800774 if (must_use_constant_pool()) return false;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100775 uint32_t dummy1, dummy2;
776 return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
777}
778
779
Steve Blocka7e24c12009-10-30 11:49:00 +0000780void Assembler::addrmod1(Instr instr,
781 Register rn,
782 Register rd,
783 const Operand& x) {
784 CheckBuffer();
785 ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
786 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000787 // Immediate.
Steve Blocka7e24c12009-10-30 11:49:00 +0000788 uint32_t rotate_imm;
789 uint32_t immed_8;
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800790 if (x.must_use_constant_pool() ||
Steve Blocka7e24c12009-10-30 11:49:00 +0000791 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
792 // The immediate operand cannot be encoded as a shifter operand, so load
793 // it first to register ip and change the original instruction to use ip.
794 // However, if the original instruction is a 'mov rd, x' (not setting the
Andrei Popescu31002712010-02-23 13:46:05 +0000795 // condition code), then replace it with a 'ldr rd, [pc]'.
Steve Blocka7e24c12009-10-30 11:49:00 +0000796 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
797 Condition cond = static_cast<Condition>(instr & CondMask);
798 if ((instr & ~CondMask) == 13*B21) { // mov, S not set
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800799 if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100800 RecordRelocInfo(x.rmode_, x.imm32_);
801 ldr(rd, MemOperand(pc, 0), cond);
802 } else {
803 // Will probably use movw, will certainly not use constant pool.
804 mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
805 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
806 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000807 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100808 // If this is not a mov or mvn instruction we may still be able to avoid
809 // a constant pool entry by using mvn or movw.
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800810 if (!x.must_use_constant_pool() &&
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100811 (instr & kMovMvnMask) != kMovMvnPattern) {
812 mov(ip, x, LeaveCC, cond);
813 } else {
814 RecordRelocInfo(x.rmode_, x.imm32_);
815 ldr(ip, MemOperand(pc, 0), cond);
816 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000817 addrmod1(instr, rn, rd, Operand(ip));
818 }
819 return;
820 }
821 instr |= I | rotate_imm*B8 | immed_8;
822 } else if (!x.rs_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000823 // Immediate shift.
Steve Blocka7e24c12009-10-30 11:49:00 +0000824 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
825 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000826 // Register shift.
Steve Blocka7e24c12009-10-30 11:49:00 +0000827 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
828 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
829 }
830 emit(instr | rn.code()*B16 | rd.code()*B12);
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100831 if (rn.is(pc) || x.rm_.is(pc)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000832 // Block constant pool emission for one instruction after reading pc.
Steve Blocka7e24c12009-10-30 11:49:00 +0000833 BlockConstPoolBefore(pc_offset() + kInstrSize);
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100834 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000835}
836
837
838void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
839 ASSERT((instr & ~(CondMask | B | L)) == B26);
840 int am = x.am_;
841 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000842 // Immediate offset.
Steve Blocka7e24c12009-10-30 11:49:00 +0000843 int offset_12 = x.offset_;
844 if (offset_12 < 0) {
845 offset_12 = -offset_12;
846 am ^= U;
847 }
848 if (!is_uint12(offset_12)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000849 // Immediate offset cannot be encoded, load it first to register ip
850 // rn (and rd in a load) should never be ip, or will be trashed.
Steve Blocka7e24c12009-10-30 11:49:00 +0000851 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
852 mov(ip, Operand(x.offset_), LeaveCC,
853 static_cast<Condition>(instr & CondMask));
854 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
855 return;
856 }
857 ASSERT(offset_12 >= 0); // no masking needed
858 instr |= offset_12;
859 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000860 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
Steve Blocka7e24c12009-10-30 11:49:00 +0000861 // register offset the constructors make sure than both shift_imm_
Andrei Popescu31002712010-02-23 13:46:05 +0000862 // and shift_op_ are initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +0000863 ASSERT(!x.rm_.is(pc));
864 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
865 }
866 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
867 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
868}
869
870
871void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
872 ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
873 ASSERT(x.rn_.is_valid());
874 int am = x.am_;
875 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000876 // Immediate offset.
Steve Blocka7e24c12009-10-30 11:49:00 +0000877 int offset_8 = x.offset_;
878 if (offset_8 < 0) {
879 offset_8 = -offset_8;
880 am ^= U;
881 }
882 if (!is_uint8(offset_8)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000883 // Immediate offset cannot be encoded, load it first to register ip
884 // rn (and rd in a load) should never be ip, or will be trashed.
Steve Blocka7e24c12009-10-30 11:49:00 +0000885 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
886 mov(ip, Operand(x.offset_), LeaveCC,
887 static_cast<Condition>(instr & CondMask));
888 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
889 return;
890 }
891 ASSERT(offset_8 >= 0); // no masking needed
892 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
893 } else if (x.shift_imm_ != 0) {
Andrei Popescu31002712010-02-23 13:46:05 +0000894 // Scaled register offset not supported, load index first
895 // rn (and rd in a load) should never be ip, or will be trashed.
Steve Blocka7e24c12009-10-30 11:49:00 +0000896 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
897 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
898 static_cast<Condition>(instr & CondMask));
899 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
900 return;
901 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000902 // Register offset.
Steve Blocka7e24c12009-10-30 11:49:00 +0000903 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
904 instr |= x.rm_.code();
905 }
906 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
907 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
908}
909
910
911void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
912 ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
913 ASSERT(rl != 0);
914 ASSERT(!rn.is(pc));
915 emit(instr | rn.code()*B16 | rl);
916}
917
918
919void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
Andrei Popescu31002712010-02-23 13:46:05 +0000920 // Unindexed addressing is not encoded by this function.
Steve Blocka7e24c12009-10-30 11:49:00 +0000921 ASSERT_EQ((B27 | B26),
922 (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
923 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
924 int am = x.am_;
925 int offset_8 = x.offset_;
926 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
927 offset_8 >>= 2;
928 if (offset_8 < 0) {
929 offset_8 = -offset_8;
930 am ^= U;
931 }
932 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
933 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
934
Andrei Popescu31002712010-02-23 13:46:05 +0000935 // Post-indexed addressing requires W == 1; different than in addrmod2/3.
Steve Blocka7e24c12009-10-30 11:49:00 +0000936 if ((am & P) == 0)
937 am |= W;
938
939 ASSERT(offset_8 >= 0); // no masking needed
940 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
941}
942
943
944int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
945 int target_pos;
946 if (L->is_bound()) {
947 target_pos = L->pos();
948 } else {
949 if (L->is_linked()) {
950 target_pos = L->pos(); // L's link
951 } else {
952 target_pos = kEndOfChain;
953 }
954 L->link_to(pc_offset());
955 }
956
957 // Block the emission of the constant pool, since the branch instruction must
Andrei Popescu31002712010-02-23 13:46:05 +0000958 // be emitted at the pc offset recorded by the label.
Steve Blocka7e24c12009-10-30 11:49:00 +0000959 BlockConstPoolBefore(pc_offset() + kInstrSize);
960 return target_pos - (pc_offset() + kPcLoadDelta);
961}
962
963
964void Assembler::label_at_put(Label* L, int at_offset) {
965 int target_pos;
966 if (L->is_bound()) {
967 target_pos = L->pos();
968 } else {
969 if (L->is_linked()) {
970 target_pos = L->pos(); // L's link
971 } else {
972 target_pos = kEndOfChain;
973 }
974 L->link_to(at_offset);
975 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
976 }
977}
978
979
Andrei Popescu31002712010-02-23 13:46:05 +0000980// Branch instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +0000981void Assembler::b(int branch_offset, Condition cond) {
982 ASSERT((branch_offset & 3) == 0);
983 int imm24 = branch_offset >> 2;
984 ASSERT(is_int24(imm24));
985 emit(cond | B27 | B25 | (imm24 & Imm24Mask));
986
Steve Block6ded16b2010-05-10 14:33:55 +0100987 if (cond == al) {
Andrei Popescu31002712010-02-23 13:46:05 +0000988 // Dead code is a good location to emit the constant pool.
Steve Blocka7e24c12009-10-30 11:49:00 +0000989 CheckConstPool(false, false);
Steve Block6ded16b2010-05-10 14:33:55 +0100990 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000991}
992
993
994void Assembler::bl(int branch_offset, Condition cond) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100995 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +0000996 ASSERT((branch_offset & 3) == 0);
997 int imm24 = branch_offset >> 2;
998 ASSERT(is_int24(imm24));
999 emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
1000}
1001
1002
1003void Assembler::blx(int branch_offset) { // v5 and above
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001004 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001005 ASSERT((branch_offset & 1) == 0);
1006 int h = ((branch_offset & 2) >> 1)*B24;
1007 int imm24 = branch_offset >> 2;
1008 ASSERT(is_int24(imm24));
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001009 emit(nv | B27 | B25 | h | (imm24 & Imm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001010}
1011
1012
1013void Assembler::blx(Register target, Condition cond) { // v5 and above
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001014 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001015 ASSERT(!target.is(pc));
1016 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
1017}
1018
1019
1020void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001021 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001022 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
1023 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
1024}
1025
1026
Andrei Popescu31002712010-02-23 13:46:05 +00001027// Data-processing instructions.
1028
Steve Blocka7e24c12009-10-30 11:49:00 +00001029void Assembler::and_(Register dst, Register src1, const Operand& src2,
1030 SBit s, Condition cond) {
1031 addrmod1(cond | 0*B21 | s, src1, dst, src2);
1032}
1033
1034
1035void Assembler::eor(Register dst, Register src1, const Operand& src2,
1036 SBit s, Condition cond) {
1037 addrmod1(cond | 1*B21 | s, src1, dst, src2);
1038}
1039
1040
1041void Assembler::sub(Register dst, Register src1, const Operand& src2,
1042 SBit s, Condition cond) {
1043 addrmod1(cond | 2*B21 | s, src1, dst, src2);
1044}
1045
1046
1047void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1048 SBit s, Condition cond) {
1049 addrmod1(cond | 3*B21 | s, src1, dst, src2);
1050}
1051
1052
1053void Assembler::add(Register dst, Register src1, const Operand& src2,
1054 SBit s, Condition cond) {
1055 addrmod1(cond | 4*B21 | s, src1, dst, src2);
1056
1057 // Eliminate pattern: push(r), pop()
1058 // str(src, MemOperand(sp, 4, NegPreIndex), al);
1059 // add(sp, sp, Operand(kPointerSize));
1060 // Both instructions can be eliminated.
Leon Clarkef7060e22010-06-03 12:02:55 +01001061 if (can_peephole_optimize(2) &&
Andrei Popescu31002712010-02-23 13:46:05 +00001062 // Pattern.
Steve Blocka7e24c12009-10-30 11:49:00 +00001063 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
1064 (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
1065 pc_ -= 2 * kInstrSize;
Leon Clarkef7060e22010-06-03 12:02:55 +01001066 if (FLAG_print_peephole_optimization) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001067 PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
1068 }
1069 }
1070}
1071
1072
1073void Assembler::adc(Register dst, Register src1, const Operand& src2,
1074 SBit s, Condition cond) {
1075 addrmod1(cond | 5*B21 | s, src1, dst, src2);
1076}
1077
1078
1079void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1080 SBit s, Condition cond) {
1081 addrmod1(cond | 6*B21 | s, src1, dst, src2);
1082}
1083
1084
1085void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1086 SBit s, Condition cond) {
1087 addrmod1(cond | 7*B21 | s, src1, dst, src2);
1088}
1089
1090
1091void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
1092 addrmod1(cond | 8*B21 | S, src1, r0, src2);
1093}
1094
1095
1096void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
1097 addrmod1(cond | 9*B21 | S, src1, r0, src2);
1098}
1099
1100
1101void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
1102 addrmod1(cond | 10*B21 | S, src1, r0, src2);
1103}
1104
1105
1106void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
1107 addrmod1(cond | 11*B21 | S, src1, r0, src2);
1108}
1109
1110
1111void Assembler::orr(Register dst, Register src1, const Operand& src2,
1112 SBit s, Condition cond) {
1113 addrmod1(cond | 12*B21 | s, src1, dst, src2);
1114}
1115
1116
1117void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1118 if (dst.is(pc)) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001119 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001120 }
Steve Block6ded16b2010-05-10 14:33:55 +01001121 // Don't allow nop instructions in the form mov rn, rn to be generated using
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001122 // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1123 // or MarkCode(int/NopMarkerTypes) pseudo instructions.
Steve Block6ded16b2010-05-10 14:33:55 +01001124 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
Steve Blocka7e24c12009-10-30 11:49:00 +00001125 addrmod1(cond | 13*B21 | s, r0, dst, src);
1126}
1127
1128
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001129void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1130 ASSERT(immediate < 0x10000);
1131 mov(reg, Operand(immediate), LeaveCC, cond);
1132}
1133
1134
1135void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1136 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1137}
1138
1139
Steve Blocka7e24c12009-10-30 11:49:00 +00001140void Assembler::bic(Register dst, Register src1, const Operand& src2,
1141 SBit s, Condition cond) {
1142 addrmod1(cond | 14*B21 | s, src1, dst, src2);
1143}
1144
1145
1146void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1147 addrmod1(cond | 15*B21 | s, r0, dst, src);
1148}
1149
1150
Andrei Popescu31002712010-02-23 13:46:05 +00001151// Multiply instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001152void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1153 SBit s, Condition cond) {
1154 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1155 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1156 src2.code()*B8 | B7 | B4 | src1.code());
1157}
1158
1159
1160void Assembler::mul(Register dst, Register src1, Register src2,
1161 SBit s, Condition cond) {
1162 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1163 // dst goes in bits 16-19 for this instruction!
1164 emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1165}
1166
1167
1168void Assembler::smlal(Register dstL,
1169 Register dstH,
1170 Register src1,
1171 Register src2,
1172 SBit s,
1173 Condition cond) {
1174 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1175 ASSERT(!dstL.is(dstH));
1176 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1177 src2.code()*B8 | B7 | B4 | src1.code());
1178}
1179
1180
1181void Assembler::smull(Register dstL,
1182 Register dstH,
1183 Register src1,
1184 Register src2,
1185 SBit s,
1186 Condition cond) {
1187 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1188 ASSERT(!dstL.is(dstH));
1189 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1190 src2.code()*B8 | B7 | B4 | src1.code());
1191}
1192
1193
1194void Assembler::umlal(Register dstL,
1195 Register dstH,
1196 Register src1,
1197 Register src2,
1198 SBit s,
1199 Condition cond) {
1200 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1201 ASSERT(!dstL.is(dstH));
1202 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1203 src2.code()*B8 | B7 | B4 | src1.code());
1204}
1205
1206
1207void Assembler::umull(Register dstL,
1208 Register dstH,
1209 Register src1,
1210 Register src2,
1211 SBit s,
1212 Condition cond) {
1213 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1214 ASSERT(!dstL.is(dstH));
1215 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1216 src2.code()*B8 | B7 | B4 | src1.code());
1217}
1218
1219
Andrei Popescu31002712010-02-23 13:46:05 +00001220// Miscellaneous arithmetic instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001221void Assembler::clz(Register dst, Register src, Condition cond) {
1222 // v5 and above.
1223 ASSERT(!dst.is(pc) && !src.is(pc));
1224 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1225 15*B8 | B4 | src.code());
1226}
1227
1228
Kristian Monsen50ef84f2010-07-29 15:18:00 +01001229// Saturating instructions.
1230
1231// Unsigned saturate.
1232void Assembler::usat(Register dst,
1233 int satpos,
1234 const Operand& src,
1235 Condition cond) {
1236 // v6 and above.
1237 ASSERT(CpuFeatures::IsSupported(ARMv7));
1238 ASSERT(!dst.is(pc) && !src.rm_.is(pc));
1239 ASSERT((satpos >= 0) && (satpos <= 31));
1240 ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1241 ASSERT(src.rs_.is(no_reg));
1242
1243 int sh = 0;
1244 if (src.shift_op_ == ASR) {
1245 sh = 1;
1246 }
1247
1248 emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1249 src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1250}
1251
1252
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001253// Bitfield manipulation instructions.
1254
1255// Unsigned bit field extract.
1256// Extracts #width adjacent bits from position #lsb in a register, and
1257// writes them to the low bits of a destination register.
1258// ubfx dst, src, #lsb, #width
1259void Assembler::ubfx(Register dst,
1260 Register src,
1261 int lsb,
1262 int width,
1263 Condition cond) {
1264 // v7 and above.
1265 ASSERT(CpuFeatures::IsSupported(ARMv7));
1266 ASSERT(!dst.is(pc) && !src.is(pc));
1267 ASSERT((lsb >= 0) && (lsb <= 31));
1268 ASSERT((width >= 1) && (width <= (32 - lsb)));
1269 emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1270 lsb*B7 | B6 | B4 | src.code());
1271}
1272
1273
1274// Signed bit field extract.
1275// Extracts #width adjacent bits from position #lsb in a register, and
1276// writes them to the low bits of a destination register. The extracted
1277// value is sign extended to fill the destination register.
1278// sbfx dst, src, #lsb, #width
1279void Assembler::sbfx(Register dst,
1280 Register src,
1281 int lsb,
1282 int width,
1283 Condition cond) {
1284 // v7 and above.
1285 ASSERT(CpuFeatures::IsSupported(ARMv7));
1286 ASSERT(!dst.is(pc) && !src.is(pc));
1287 ASSERT((lsb >= 0) && (lsb <= 31));
1288 ASSERT((width >= 1) && (width <= (32 - lsb)));
1289 emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1290 lsb*B7 | B6 | B4 | src.code());
1291}
1292
1293
1294// Bit field clear.
1295// Sets #width adjacent bits at position #lsb in the destination register
1296// to zero, preserving the value of the other bits.
1297// bfc dst, #lsb, #width
1298void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1299 // v7 and above.
1300 ASSERT(CpuFeatures::IsSupported(ARMv7));
1301 ASSERT(!dst.is(pc));
1302 ASSERT((lsb >= 0) && (lsb <= 31));
1303 ASSERT((width >= 1) && (width <= (32 - lsb)));
1304 int msb = lsb + width - 1;
1305 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1306}
1307
1308
1309// Bit field insert.
1310// Inserts #width adjacent bits from the low bits of the source register
1311// into position #lsb of the destination register.
1312// bfi dst, src, #lsb, #width
1313void Assembler::bfi(Register dst,
1314 Register src,
1315 int lsb,
1316 int width,
1317 Condition cond) {
1318 // v7 and above.
1319 ASSERT(CpuFeatures::IsSupported(ARMv7));
1320 ASSERT(!dst.is(pc) && !src.is(pc));
1321 ASSERT((lsb >= 0) && (lsb <= 31));
1322 ASSERT((width >= 1) && (width <= (32 - lsb)));
1323 int msb = lsb + width - 1;
1324 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1325 src.code());
1326}
1327
1328
Andrei Popescu31002712010-02-23 13:46:05 +00001329// Status register access instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001330void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1331 ASSERT(!dst.is(pc));
1332 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1333}
1334
1335
1336void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1337 Condition cond) {
1338 ASSERT(fields >= B16 && fields < B20); // at least one field set
1339 Instr instr;
1340 if (!src.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001341 // Immediate.
Steve Blocka7e24c12009-10-30 11:49:00 +00001342 uint32_t rotate_imm;
1343 uint32_t immed_8;
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001344 if (src.must_use_constant_pool() ||
Steve Blocka7e24c12009-10-30 11:49:00 +00001345 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
Andrei Popescu31002712010-02-23 13:46:05 +00001346 // Immediate operand cannot be encoded, load it first to register ip.
Steve Blocka7e24c12009-10-30 11:49:00 +00001347 RecordRelocInfo(src.rmode_, src.imm32_);
1348 ldr(ip, MemOperand(pc, 0), cond);
1349 msr(fields, Operand(ip), cond);
1350 return;
1351 }
1352 instr = I | rotate_imm*B8 | immed_8;
1353 } else {
1354 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
1355 instr = src.rm_.code();
1356 }
1357 emit(cond | instr | B24 | B21 | fields | 15*B12);
1358}
1359
1360
Andrei Popescu31002712010-02-23 13:46:05 +00001361// Load/Store instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001362void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1363 if (dst.is(pc)) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001364 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001365 }
1366 addrmod2(cond | B26 | L, dst, src);
1367
Leon Clarkef7060e22010-06-03 12:02:55 +01001368 // Eliminate pattern: push(ry), pop(rx)
1369 // str(ry, MemOperand(sp, 4, NegPreIndex), al)
1370 // ldr(rx, MemOperand(sp, 4, PostIndex), al)
1371 // Both instructions can be eliminated if ry = rx.
1372 // If ry != rx, a register copy from ry to rx is inserted
1373 // after eliminating the push and the pop instructions.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001374 if (can_peephole_optimize(2)) {
1375 Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
1376 Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
Leon Clarkef7060e22010-06-03 12:02:55 +01001377
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001378 if (IsPush(push_instr) && IsPop(pop_instr)) {
1379 if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
1380 // For consecutive push and pop on different registers,
1381 // we delete both the push & pop and insert a register move.
1382 // push ry, pop rx --> mov rx, ry
1383 Register reg_pushed, reg_popped;
1384 reg_pushed = GetRd(push_instr);
1385 reg_popped = GetRd(pop_instr);
1386 pc_ -= 2 * kInstrSize;
1387 // Insert a mov instruction, which is better than a pair of push & pop
1388 mov(reg_popped, reg_pushed);
1389 if (FLAG_print_peephole_optimization) {
1390 PrintF("%x push/pop (diff reg) replaced by a reg move\n",
1391 pc_offset());
1392 }
1393 } else {
1394 // For consecutive push and pop on the same register,
1395 // both the push and the pop can be deleted.
1396 pc_ -= 2 * kInstrSize;
1397 if (FLAG_print_peephole_optimization) {
1398 PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1399 }
Leon Clarkef7060e22010-06-03 12:02:55 +01001400 }
1401 }
1402 }
1403
1404 if (can_peephole_optimize(2)) {
1405 Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
1406 Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
1407
1408 if ((IsStrRegFpOffset(str_instr) &&
1409 IsLdrRegFpOffset(ldr_instr)) ||
1410 (IsStrRegFpNegOffset(str_instr) &&
1411 IsLdrRegFpNegOffset(ldr_instr))) {
1412 if ((ldr_instr & kLdrStrInstrArgumentMask) ==
1413 (str_instr & kLdrStrInstrArgumentMask)) {
1414 // Pattern: Ldr/str same fp+offset, same register.
1415 //
1416 // The following:
1417 // str rx, [fp, #-12]
1418 // ldr rx, [fp, #-12]
1419 //
1420 // Becomes:
1421 // str rx, [fp, #-12]
1422
1423 pc_ -= 1 * kInstrSize;
1424 if (FLAG_print_peephole_optimization) {
1425 PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
1426 }
1427 } else if ((ldr_instr & kLdrStrOffsetMask) ==
1428 (str_instr & kLdrStrOffsetMask)) {
1429 // Pattern: Ldr/str same fp+offset, different register.
1430 //
1431 // The following:
1432 // str rx, [fp, #-12]
1433 // ldr ry, [fp, #-12]
1434 //
1435 // Becomes:
1436 // str rx, [fp, #-12]
1437 // mov ry, rx
1438
1439 Register reg_stored, reg_loaded;
1440 reg_stored = GetRd(str_instr);
1441 reg_loaded = GetRd(ldr_instr);
1442 pc_ -= 1 * kInstrSize;
1443 // Insert a mov instruction, which is better than ldr.
1444 mov(reg_loaded, reg_stored);
1445 if (FLAG_print_peephole_optimization) {
1446 PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
1447 }
1448 }
1449 }
1450 }
1451
1452 if (can_peephole_optimize(3)) {
1453 Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
1454 Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
1455 Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
1456 if (IsPush(mem_write_instr) &&
1457 IsPop(mem_read_instr)) {
1458 if ((IsLdrRegFpOffset(ldr_instr) ||
1459 IsLdrRegFpNegOffset(ldr_instr))) {
1460 if ((mem_write_instr & kRdMask) ==
1461 (mem_read_instr & kRdMask)) {
1462 // Pattern: push & pop from/to same register,
1463 // with a fp+offset ldr in between
1464 //
1465 // The following:
1466 // str rx, [sp, #-4]!
1467 // ldr rz, [fp, #-24]
1468 // ldr rx, [sp], #+4
1469 //
1470 // Becomes:
1471 // if(rx == rz)
1472 // delete all
1473 // else
1474 // ldr rz, [fp, #-24]
1475
1476 if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) {
1477 pc_ -= 3 * kInstrSize;
1478 } else {
1479 pc_ -= 3 * kInstrSize;
1480 // Reinsert back the ldr rz.
1481 emit(ldr_instr);
1482 }
1483 if (FLAG_print_peephole_optimization) {
1484 PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
1485 }
1486 } else {
1487 // Pattern: push & pop from/to different registers
1488 // with a fp+offset ldr in between
1489 //
1490 // The following:
1491 // str rx, [sp, #-4]!
1492 // ldr rz, [fp, #-24]
1493 // ldr ry, [sp], #+4
1494 //
1495 // Becomes:
1496 // if(ry == rz)
1497 // mov ry, rx;
1498 // else if(rx != rz)
1499 // ldr rz, [fp, #-24]
1500 // mov ry, rx
1501 // else if((ry != rz) || (rx == rz)) becomes:
1502 // mov ry, rx
1503 // ldr rz, [fp, #-24]
1504
1505 Register reg_pushed, reg_popped;
1506 if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) {
1507 reg_pushed = GetRd(mem_write_instr);
1508 reg_popped = GetRd(mem_read_instr);
1509 pc_ -= 3 * kInstrSize;
1510 mov(reg_popped, reg_pushed);
1511 } else if ((mem_write_instr & kRdMask)
1512 != (ldr_instr & kRdMask)) {
1513 reg_pushed = GetRd(mem_write_instr);
1514 reg_popped = GetRd(mem_read_instr);
1515 pc_ -= 3 * kInstrSize;
1516 emit(ldr_instr);
1517 mov(reg_popped, reg_pushed);
1518 } else if (((mem_read_instr & kRdMask)
1519 != (ldr_instr & kRdMask)) ||
1520 ((mem_write_instr & kRdMask)
1521 == (ldr_instr & kRdMask)) ) {
1522 reg_pushed = GetRd(mem_write_instr);
1523 reg_popped = GetRd(mem_read_instr);
1524 pc_ -= 3 * kInstrSize;
1525 mov(reg_popped, reg_pushed);
1526 emit(ldr_instr);
1527 }
1528 if (FLAG_print_peephole_optimization) {
1529 PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
1530 }
1531 }
1532 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001533 }
1534 }
1535}
1536
1537
1538void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1539 addrmod2(cond | B26, src, dst);
1540
1541 // Eliminate pattern: pop(), push(r)
1542 // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
1543 // -> str r, [sp, 0], al
Leon Clarkef7060e22010-06-03 12:02:55 +01001544 if (can_peephole_optimize(2) &&
Andrei Popescu31002712010-02-23 13:46:05 +00001545 // Pattern.
Steve Blocka7e24c12009-10-30 11:49:00 +00001546 instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
1547 instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
1548 pc_ -= 2 * kInstrSize;
1549 emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
Leon Clarkef7060e22010-06-03 12:02:55 +01001550 if (FLAG_print_peephole_optimization) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001551 PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1552 }
1553 }
1554}
1555
1556
1557void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1558 addrmod2(cond | B26 | B | L, dst, src);
1559}
1560
1561
1562void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1563 addrmod2(cond | B26 | B, src, dst);
1564}
1565
1566
1567void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1568 addrmod3(cond | L | B7 | H | B4, dst, src);
1569}
1570
1571
1572void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1573 addrmod3(cond | B7 | H | B4, src, dst);
1574}
1575
1576
1577void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1578 addrmod3(cond | L | B7 | S6 | B4, dst, src);
1579}
1580
1581
1582void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1583 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1584}
1585
1586
Leon Clarkef7060e22010-06-03 12:02:55 +01001587void Assembler::ldrd(Register dst1, Register dst2,
1588 const MemOperand& src, Condition cond) {
1589 ASSERT(CpuFeatures::IsEnabled(ARMv7));
Kristian Monsen25f61362010-05-21 11:50:48 +01001590 ASSERT(src.rm().is(no_reg));
Leon Clarkef7060e22010-06-03 12:02:55 +01001591 ASSERT(!dst1.is(lr)); // r14.
1592 ASSERT_EQ(0, dst1.code() % 2);
1593 ASSERT_EQ(dst1.code() + 1, dst2.code());
1594 addrmod3(cond | B7 | B6 | B4, dst1, src);
Kristian Monsen25f61362010-05-21 11:50:48 +01001595}
1596
1597
Leon Clarkef7060e22010-06-03 12:02:55 +01001598void Assembler::strd(Register src1, Register src2,
1599 const MemOperand& dst, Condition cond) {
Kristian Monsen25f61362010-05-21 11:50:48 +01001600 ASSERT(dst.rm().is(no_reg));
Leon Clarkef7060e22010-06-03 12:02:55 +01001601 ASSERT(!src1.is(lr)); // r14.
1602 ASSERT_EQ(0, src1.code() % 2);
1603 ASSERT_EQ(src1.code() + 1, src2.code());
1604 ASSERT(CpuFeatures::IsEnabled(ARMv7));
1605 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
Kristian Monsen25f61362010-05-21 11:50:48 +01001606}
1607
Andrei Popescu31002712010-02-23 13:46:05 +00001608// Load/Store multiple instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001609void Assembler::ldm(BlockAddrMode am,
1610 Register base,
1611 RegList dst,
1612 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00001613 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
Steve Blocka7e24c12009-10-30 11:49:00 +00001614 ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1615
1616 addrmod4(cond | B27 | am | L, base, dst);
1617
Andrei Popescu31002712010-02-23 13:46:05 +00001618 // Emit the constant pool after a function return implemented by ldm ..{..pc}.
Steve Blocka7e24c12009-10-30 11:49:00 +00001619 if (cond == al && (dst & pc.bit()) != 0) {
1620 // There is a slight chance that the ldm instruction was actually a call,
1621 // in which case it would be wrong to return into the constant pool; we
1622 // recognize this case by checking if the emission of the pool was blocked
1623 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1624 // the case, we emit a jump over the pool.
1625 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1626 }
1627}
1628
1629
1630void Assembler::stm(BlockAddrMode am,
1631 Register base,
1632 RegList src,
1633 Condition cond) {
1634 addrmod4(cond | B27 | am, base, src);
1635}
1636
1637
Andrei Popescu31002712010-02-23 13:46:05 +00001638// Exception-generating instructions and debugging support.
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001639// Stops with a non-negative code less than kNumOfWatchedStops support
1640// enabling/disabling and a counter feature. See simulator-arm.h .
1641void Assembler::stop(const char* msg, Condition cond, int32_t code) {
Andrei Popescu402d9372010-02-26 13:31:12 +00001642#ifndef __arm__
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001643 // See constants-arm.h SoftwareInterruptCodes. Unluckily the Assembler and
1644 // Simulator do not share constants declaration.
1645 ASSERT(code >= kDefaultStopCode);
1646 static const uint32_t kStopInterruptCode = 1 << 23;
1647 static const uint32_t kMaxStopCode = kStopInterruptCode - 1;
1648 // The Simulator will handle the stop instruction and get the message address.
1649 // It expects to find the address just after the svc instruction.
1650 BlockConstPoolFor(2);
1651 if (code >= 0) {
1652 svc(kStopInterruptCode + code, cond);
1653 } else {
1654 svc(kStopInterruptCode + kMaxStopCode, cond);
1655 }
1656 emit(reinterpret_cast<Instr>(msg));
Andrei Popescu402d9372010-02-26 13:31:12 +00001657#else // def __arm__
1658#ifdef CAN_USE_ARMV5_INSTRUCTIONS
Ben Murdochb0fe1622011-05-05 13:52:32 +01001659 ASSERT(cond == al);
Steve Blocka7e24c12009-10-30 11:49:00 +00001660 bkpt(0);
Andrei Popescu402d9372010-02-26 13:31:12 +00001661#else // ndef CAN_USE_ARMV5_INSTRUCTIONS
Ben Murdochb0fe1622011-05-05 13:52:32 +01001662 svc(0x9f0001, cond);
Andrei Popescu402d9372010-02-26 13:31:12 +00001663#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
1664#endif // def __arm__
Steve Blocka7e24c12009-10-30 11:49:00 +00001665}
1666
1667
1668void Assembler::bkpt(uint32_t imm16) { // v5 and above
1669 ASSERT(is_uint16(imm16));
1670 emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
1671}
1672
1673
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001674void Assembler::svc(uint32_t imm24, Condition cond) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001675 ASSERT(is_uint24(imm24));
1676 emit(cond | 15*B24 | imm24);
1677}
1678
1679
Andrei Popescu31002712010-02-23 13:46:05 +00001680// Coprocessor instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001681void Assembler::cdp(Coprocessor coproc,
1682 int opcode_1,
1683 CRegister crd,
1684 CRegister crn,
1685 CRegister crm,
1686 int opcode_2,
1687 Condition cond) {
1688 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1689 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1690 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1691}
1692
1693
1694void Assembler::cdp2(Coprocessor coproc,
1695 int opcode_1,
1696 CRegister crd,
1697 CRegister crn,
1698 CRegister crm,
1699 int opcode_2) { // v5 and above
1700 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
1701}
1702
1703
1704void Assembler::mcr(Coprocessor coproc,
1705 int opcode_1,
1706 Register rd,
1707 CRegister crn,
1708 CRegister crm,
1709 int opcode_2,
1710 Condition cond) {
1711 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1712 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1713 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1714}
1715
1716
1717void Assembler::mcr2(Coprocessor coproc,
1718 int opcode_1,
1719 Register rd,
1720 CRegister crn,
1721 CRegister crm,
1722 int opcode_2) { // v5 and above
1723 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1724}
1725
1726
1727void Assembler::mrc(Coprocessor coproc,
1728 int opcode_1,
1729 Register rd,
1730 CRegister crn,
1731 CRegister crm,
1732 int opcode_2,
1733 Condition cond) {
1734 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1735 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1736 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1737}
1738
1739
1740void Assembler::mrc2(Coprocessor coproc,
1741 int opcode_1,
1742 Register rd,
1743 CRegister crn,
1744 CRegister crm,
1745 int opcode_2) { // v5 and above
1746 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
1747}
1748
1749
1750void Assembler::ldc(Coprocessor coproc,
1751 CRegister crd,
1752 const MemOperand& src,
1753 LFlag l,
1754 Condition cond) {
1755 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1756}
1757
1758
1759void Assembler::ldc(Coprocessor coproc,
1760 CRegister crd,
1761 Register rn,
1762 int option,
1763 LFlag l,
1764 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00001765 // Unindexed addressing.
Steve Blocka7e24c12009-10-30 11:49:00 +00001766 ASSERT(is_uint8(option));
1767 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1768 coproc*B8 | (option & 255));
1769}
1770
1771
1772void Assembler::ldc2(Coprocessor coproc,
1773 CRegister crd,
1774 const MemOperand& src,
1775 LFlag l) { // v5 and above
1776 ldc(coproc, crd, src, l, static_cast<Condition>(nv));
1777}
1778
1779
1780void Assembler::ldc2(Coprocessor coproc,
1781 CRegister crd,
1782 Register rn,
1783 int option,
1784 LFlag l) { // v5 and above
1785 ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1786}
1787
1788
1789void Assembler::stc(Coprocessor coproc,
1790 CRegister crd,
1791 const MemOperand& dst,
1792 LFlag l,
1793 Condition cond) {
1794 addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
1795}
1796
1797
1798void Assembler::stc(Coprocessor coproc,
1799 CRegister crd,
1800 Register rn,
1801 int option,
1802 LFlag l,
1803 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00001804 // Unindexed addressing.
Steve Blocka7e24c12009-10-30 11:49:00 +00001805 ASSERT(is_uint8(option));
1806 emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
1807 coproc*B8 | (option & 255));
1808}
1809
1810
1811void Assembler::stc2(Coprocessor
1812 coproc, CRegister crd,
1813 const MemOperand& dst,
1814 LFlag l) { // v5 and above
1815 stc(coproc, crd, dst, l, static_cast<Condition>(nv));
1816}
1817
1818
1819void Assembler::stc2(Coprocessor coproc,
1820 CRegister crd,
1821 Register rn,
1822 int option,
1823 LFlag l) { // v5 and above
1824 stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
1825}
1826
1827
Steve Blockd0582a62009-12-15 09:54:21 +00001828// Support for VFP.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001829
Leon Clarked91b9f72010-01-27 17:25:45 +00001830void Assembler::vldr(const DwVfpRegister dst,
1831 const Register base,
1832 int offset,
1833 const Condition cond) {
1834 // Ddst = MEM(Rbase + offset).
1835 // Instruction details available in ARM DDI 0406A, A8-628.
Ben Murdochb0fe1622011-05-05 13:52:32 +01001836 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
Leon Clarked91b9f72010-01-27 17:25:45 +00001837 // Vdst(15-12) | 1011(11-8) | offset
1838 ASSERT(CpuFeatures::IsEnabled(VFP3));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001839 int u = 1;
1840 if (offset < 0) {
1841 offset = -offset;
1842 u = 0;
1843 }
Leon Clarked91b9f72010-01-27 17:25:45 +00001844 ASSERT(offset % 4 == 0);
Steve Block6ded16b2010-05-10 14:33:55 +01001845 ASSERT((offset / 4) < 256);
Iain Merrick75681382010-08-19 15:07:18 +01001846 ASSERT(offset >= 0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001847 emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
Leon Clarked91b9f72010-01-27 17:25:45 +00001848 0xB*B8 | ((offset / 4) & 255));
1849}
1850
1851
Steve Block6ded16b2010-05-10 14:33:55 +01001852void Assembler::vldr(const SwVfpRegister dst,
1853 const Register base,
1854 int offset,
1855 const Condition cond) {
1856 // Sdst = MEM(Rbase + offset).
1857 // Instruction details available in ARM DDI 0406A, A8-628.
Ben Murdochb0fe1622011-05-05 13:52:32 +01001858 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
Steve Block6ded16b2010-05-10 14:33:55 +01001859 // Vdst(15-12) | 1010(11-8) | offset
1860 ASSERT(CpuFeatures::IsEnabled(VFP3));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001861 int u = 1;
1862 if (offset < 0) {
1863 offset = -offset;
1864 u = 0;
1865 }
Steve Block6ded16b2010-05-10 14:33:55 +01001866 ASSERT(offset % 4 == 0);
1867 ASSERT((offset / 4) < 256);
Iain Merrick75681382010-08-19 15:07:18 +01001868 ASSERT(offset >= 0);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001869 int sd, d;
1870 dst.split_code(&sd, &d);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001871 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
Steve Block6ded16b2010-05-10 14:33:55 +01001872 0xA*B8 | ((offset / 4) & 255));
1873}
1874
1875
Leon Clarked91b9f72010-01-27 17:25:45 +00001876void Assembler::vstr(const DwVfpRegister src,
1877 const Register base,
1878 int offset,
1879 const Condition cond) {
1880 // MEM(Rbase + offset) = Dsrc.
1881 // Instruction details available in ARM DDI 0406A, A8-786.
Ben Murdochb0fe1622011-05-05 13:52:32 +01001882 // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
Leon Clarked91b9f72010-01-27 17:25:45 +00001883 // Vsrc(15-12) | 1011(11-8) | (offset/4)
1884 ASSERT(CpuFeatures::IsEnabled(VFP3));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001885 int u = 1;
1886 if (offset < 0) {
1887 offset = -offset;
1888 u = 0;
1889 }
Leon Clarked91b9f72010-01-27 17:25:45 +00001890 ASSERT(offset % 4 == 0);
Steve Block6ded16b2010-05-10 14:33:55 +01001891 ASSERT((offset / 4) < 256);
Iain Merrick75681382010-08-19 15:07:18 +01001892 ASSERT(offset >= 0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001893 emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
Leon Clarked91b9f72010-01-27 17:25:45 +00001894 0xB*B8 | ((offset / 4) & 255));
1895}
1896
1897
Iain Merrick75681382010-08-19 15:07:18 +01001898void Assembler::vstr(const SwVfpRegister src,
1899 const Register base,
1900 int offset,
1901 const Condition cond) {
1902 // MEM(Rbase + offset) = SSrc.
1903 // Instruction details available in ARM DDI 0406A, A8-786.
Ben Murdochb0fe1622011-05-05 13:52:32 +01001904 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
Iain Merrick75681382010-08-19 15:07:18 +01001905 // Vdst(15-12) | 1010(11-8) | (offset/4)
1906 ASSERT(CpuFeatures::IsEnabled(VFP3));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001907 int u = 1;
1908 if (offset < 0) {
1909 offset = -offset;
1910 u = 0;
1911 }
Iain Merrick75681382010-08-19 15:07:18 +01001912 ASSERT(offset % 4 == 0);
1913 ASSERT((offset / 4) < 256);
1914 ASSERT(offset >= 0);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001915 int sd, d;
1916 src.split_code(&sd, &d);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001917 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
Iain Merrick75681382010-08-19 15:07:18 +01001918 0xA*B8 | ((offset / 4) & 255));
1919}
1920
1921
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001922static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
1923 uint64_t i;
1924 memcpy(&i, &d, 8);
1925
1926 *lo = i & 0xffffffff;
1927 *hi = i >> 32;
1928}
1929
1930// Only works for little endian floating point formats.
1931// We don't support VFP on the mixed endian floating point platform.
1932static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
1933 ASSERT(CpuFeatures::IsEnabled(VFP3));
1934
1935 // VMOV can accept an immediate of the form:
1936 //
1937 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
1938 //
1939 // The immediate is encoded using an 8-bit quantity, comprised of two
1940 // 4-bit fields. For an 8-bit immediate of the form:
1941 //
1942 // [abcdefgh]
1943 //
1944 // where a is the MSB and h is the LSB, an immediate 64-bit double can be
1945 // created of the form:
1946 //
1947 // [aBbbbbbb,bbcdefgh,00000000,00000000,
1948 // 00000000,00000000,00000000,00000000]
1949 //
1950 // where B = ~b.
1951 //
1952
1953 uint32_t lo, hi;
1954 DoubleAsTwoUInt32(d, &lo, &hi);
1955
1956 // The most obvious constraint is the long block of zeroes.
1957 if ((lo != 0) || ((hi & 0xffff) != 0)) {
1958 return false;
1959 }
1960
1961 // Bits 62:55 must be all clear or all set.
1962 if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
1963 return false;
1964 }
1965
1966 // Bit 63 must be NOT bit 62.
1967 if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
1968 return false;
1969 }
1970
1971 // Create the encoded immediate in the form:
1972 // [00000000,0000abcd,00000000,0000efgh]
1973 *encoding = (hi >> 16) & 0xf; // Low nybble.
1974 *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
1975 *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
1976
1977 return true;
1978}
1979
1980
1981void Assembler::vmov(const DwVfpRegister dst,
1982 double imm,
1983 const Condition cond) {
1984 // Dd = immediate
1985 // Instruction details available in ARM DDI 0406B, A8-640.
1986 ASSERT(CpuFeatures::IsEnabled(VFP3));
1987
1988 uint32_t enc;
1989 if (FitsVMOVDoubleImmediate(imm, &enc)) {
1990 // The double can be encoded in the instruction.
1991 emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
1992 } else {
1993 // Synthesise the double from ARM immediates. This could be implemented
1994 // using vldr from a constant pool.
1995 uint32_t lo, hi;
1996 DoubleAsTwoUInt32(imm, &lo, &hi);
1997
1998 if (lo == hi) {
1999 // If the lo and hi parts of the double are equal, the literal is easier
2000 // to create. This is the case with 0.0.
2001 mov(ip, Operand(lo));
2002 vmov(dst, ip, ip);
2003 } else {
2004 // Move the low part of the double into the lower of the corresponsing S
2005 // registers of D register dst.
2006 mov(ip, Operand(lo));
2007 vmov(dst.low(), ip, cond);
2008
2009 // Move the high part of the double into the higher of the corresponsing S
2010 // registers of D register dst.
2011 mov(ip, Operand(hi));
2012 vmov(dst.high(), ip, cond);
2013 }
2014 }
2015}
2016
2017
2018void Assembler::vmov(const SwVfpRegister dst,
2019 const SwVfpRegister src,
2020 const Condition cond) {
2021 // Sd = Sm
2022 // Instruction details available in ARM DDI 0406B, A8-642.
2023 ASSERT(CpuFeatures::IsEnabled(VFP3));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002024 int sd, d, sm, m;
2025 dst.split_code(&sd, &d);
2026 src.split_code(&sm, &m);
2027 emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002028}
2029
2030
Leon Clarkee46be812010-01-19 14:06:41 +00002031void Assembler::vmov(const DwVfpRegister dst,
Steve Block8defd9f2010-07-08 12:39:36 +01002032 const DwVfpRegister src,
2033 const Condition cond) {
2034 // Dd = Dm
2035 // Instruction details available in ARM DDI 0406B, A8-642.
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002036 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Block8defd9f2010-07-08 12:39:36 +01002037 emit(cond | 0xE*B24 | 0xB*B20 |
2038 dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
2039}
2040
2041
2042void Assembler::vmov(const DwVfpRegister dst,
Leon Clarkee46be812010-01-19 14:06:41 +00002043 const Register src1,
2044 const Register src2,
2045 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00002046 // Dm = <Rt,Rt2>.
2047 // Instruction details available in ARM DDI 0406A, A8-646.
2048 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2049 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2050 ASSERT(CpuFeatures::IsEnabled(VFP3));
2051 ASSERT(!src1.is(pc) && !src2.is(pc));
2052 emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2053 src1.code()*B12 | 0xB*B8 | B4 | dst.code());
2054}
2055
2056
Leon Clarkee46be812010-01-19 14:06:41 +00002057void Assembler::vmov(const Register dst1,
2058 const Register dst2,
2059 const DwVfpRegister src,
2060 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00002061 // <Rt,Rt2> = Dm.
2062 // Instruction details available in ARM DDI 0406A, A8-646.
2063 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2064 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2065 ASSERT(CpuFeatures::IsEnabled(VFP3));
2066 ASSERT(!dst1.is(pc) && !dst2.is(pc));
2067 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2068 dst1.code()*B12 | 0xB*B8 | B4 | src.code());
2069}
2070
2071
Leon Clarkee46be812010-01-19 14:06:41 +00002072void Assembler::vmov(const SwVfpRegister dst,
Steve Blockd0582a62009-12-15 09:54:21 +00002073 const Register src,
Steve Blockd0582a62009-12-15 09:54:21 +00002074 const Condition cond) {
2075 // Sn = Rt.
2076 // Instruction details available in ARM DDI 0406A, A8-642.
2077 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2078 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2079 ASSERT(CpuFeatures::IsEnabled(VFP3));
2080 ASSERT(!src.is(pc));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002081 int sn, n;
2082 dst.split_code(&sn, &n);
2083 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
Steve Blockd0582a62009-12-15 09:54:21 +00002084}
2085
2086
Leon Clarkee46be812010-01-19 14:06:41 +00002087void Assembler::vmov(const Register dst,
2088 const SwVfpRegister src,
Steve Blockd0582a62009-12-15 09:54:21 +00002089 const Condition cond) {
2090 // Rt = Sn.
2091 // Instruction details available in ARM DDI 0406A, A8-642.
2092 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2093 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2094 ASSERT(CpuFeatures::IsEnabled(VFP3));
2095 ASSERT(!dst.is(pc));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002096 int sn, n;
2097 src.split_code(&sn, &n);
2098 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
Steve Blockd0582a62009-12-15 09:54:21 +00002099}
2100
2101
Steve Block6ded16b2010-05-10 14:33:55 +01002102// Type of data to read from or write to VFP register.
2103// Used as specifier in generic vcvt instruction.
2104enum VFPType { S32, U32, F32, F64 };
2105
2106
2107static bool IsSignedVFPType(VFPType type) {
2108 switch (type) {
2109 case S32:
2110 return true;
2111 case U32:
2112 return false;
2113 default:
2114 UNREACHABLE();
2115 return false;
2116 }
Steve Blockd0582a62009-12-15 09:54:21 +00002117}
2118
2119
Steve Block6ded16b2010-05-10 14:33:55 +01002120static bool IsIntegerVFPType(VFPType type) {
2121 switch (type) {
2122 case S32:
2123 case U32:
2124 return true;
2125 case F32:
2126 case F64:
2127 return false;
2128 default:
2129 UNREACHABLE();
2130 return false;
2131 }
2132}
2133
2134
2135static bool IsDoubleVFPType(VFPType type) {
2136 switch (type) {
2137 case F32:
2138 return false;
2139 case F64:
2140 return true;
2141 default:
2142 UNREACHABLE();
2143 return false;
2144 }
2145}
2146
2147
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002148// Split five bit reg_code based on size of reg_type.
2149// 32-bit register codes are Vm:M
2150// 64-bit register codes are M:Vm
2151// where Vm is four bits, and M is a single bit.
2152static void SplitRegCode(VFPType reg_type,
Steve Block6ded16b2010-05-10 14:33:55 +01002153 int reg_code,
2154 int* vm,
2155 int* m) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002156 ASSERT((reg_code >= 0) && (reg_code <= 31));
2157 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2158 // 32 bit type.
Steve Block6ded16b2010-05-10 14:33:55 +01002159 *m = reg_code & 0x1;
2160 *vm = reg_code >> 1;
2161 } else {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002162 // 64 bit type.
Steve Block6ded16b2010-05-10 14:33:55 +01002163 *m = (reg_code & 0x10) >> 4;
2164 *vm = reg_code & 0x0F;
2165 }
2166}
2167
2168
2169// Encode vcvt.src_type.dst_type instruction.
2170static Instr EncodeVCVT(const VFPType dst_type,
2171 const int dst_code,
2172 const VFPType src_type,
2173 const int src_code,
Russell Brenner90bac252010-11-18 13:33:46 -08002174 Assembler::ConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002175 const Condition cond) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002176 ASSERT(src_type != dst_type);
2177 int D, Vd, M, Vm;
2178 SplitRegCode(src_type, src_code, &Vm, &M);
2179 SplitRegCode(dst_type, dst_code, &Vd, &D);
2180
Steve Block6ded16b2010-05-10 14:33:55 +01002181 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2182 // Conversion between IEEE floating point and 32-bit integer.
2183 // Instruction details available in ARM DDI 0406B, A8.6.295.
2184 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2185 // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2186 ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2187
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002188 int sz, opc2, op;
Steve Block6ded16b2010-05-10 14:33:55 +01002189
2190 if (IsIntegerVFPType(dst_type)) {
2191 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2192 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
Russell Brenner90bac252010-11-18 13:33:46 -08002193 op = mode;
Steve Block6ded16b2010-05-10 14:33:55 +01002194 } else {
2195 ASSERT(IsIntegerVFPType(src_type));
Steve Block6ded16b2010-05-10 14:33:55 +01002196 opc2 = 0x0;
2197 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2198 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
Steve Block6ded16b2010-05-10 14:33:55 +01002199 }
2200
2201 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2202 Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2203 } else {
2204 // Conversion between IEEE double and single precision.
2205 // Instruction details available in ARM DDI 0406B, A8.6.298.
2206 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2207 // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002208 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
Steve Block6ded16b2010-05-10 14:33:55 +01002209 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2210 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2211 }
2212}
2213
2214
2215void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2216 const SwVfpRegister src,
Russell Brenner90bac252010-11-18 13:33:46 -08002217 ConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002218 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00002219 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002220 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002221}
2222
2223
2224void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2225 const SwVfpRegister src,
Russell Brenner90bac252010-11-18 13:33:46 -08002226 ConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002227 const Condition cond) {
2228 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002229 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002230}
2231
2232
2233void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2234 const SwVfpRegister src,
Russell Brenner90bac252010-11-18 13:33:46 -08002235 ConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002236 const Condition cond) {
2237 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002238 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002239}
2240
2241
2242void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2243 const DwVfpRegister src,
Russell Brenner90bac252010-11-18 13:33:46 -08002244 ConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002245 const Condition cond) {
2246 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002247 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002248}
2249
2250
2251void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2252 const DwVfpRegister src,
Russell Brenner90bac252010-11-18 13:33:46 -08002253 ConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002254 const Condition cond) {
2255 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002256 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002257}
2258
2259
2260void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2261 const SwVfpRegister src,
Russell Brenner90bac252010-11-18 13:33:46 -08002262 ConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002263 const Condition cond) {
2264 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002265 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002266}
2267
2268
2269void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2270 const DwVfpRegister src,
Russell Brenner90bac252010-11-18 13:33:46 -08002271 ConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002272 const Condition cond) {
2273 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002274 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
Steve Blockd0582a62009-12-15 09:54:21 +00002275}
2276
2277
Leon Clarkee46be812010-01-19 14:06:41 +00002278void Assembler::vadd(const DwVfpRegister dst,
2279 const DwVfpRegister src1,
2280 const DwVfpRegister src2,
2281 const Condition cond) {
2282 // Dd = vadd(Dn, Dm) double precision floating point addition.
Steve Blockd0582a62009-12-15 09:54:21 +00002283 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2284 // Instruction details available in ARM DDI 0406A, A8-536.
2285 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2286 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2287 ASSERT(CpuFeatures::IsEnabled(VFP3));
2288 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2289 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2290}
2291
2292
Leon Clarkee46be812010-01-19 14:06:41 +00002293void Assembler::vsub(const DwVfpRegister dst,
2294 const DwVfpRegister src1,
2295 const DwVfpRegister src2,
2296 const Condition cond) {
2297 // Dd = vsub(Dn, Dm) double precision floating point subtraction.
Steve Blockd0582a62009-12-15 09:54:21 +00002298 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2299 // Instruction details available in ARM DDI 0406A, A8-784.
2300 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2301 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
2302 ASSERT(CpuFeatures::IsEnabled(VFP3));
2303 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2304 dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2305}
2306
2307
Leon Clarkee46be812010-01-19 14:06:41 +00002308void Assembler::vmul(const DwVfpRegister dst,
2309 const DwVfpRegister src1,
2310 const DwVfpRegister src2,
2311 const Condition cond) {
2312 // Dd = vmul(Dn, Dm) double precision floating point multiplication.
Steve Blockd0582a62009-12-15 09:54:21 +00002313 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2314 // Instruction details available in ARM DDI 0406A, A8-784.
2315 // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
2316 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2317 ASSERT(CpuFeatures::IsEnabled(VFP3));
2318 emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
2319 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2320}
2321
2322
Leon Clarkee46be812010-01-19 14:06:41 +00002323void Assembler::vdiv(const DwVfpRegister dst,
2324 const DwVfpRegister src1,
2325 const DwVfpRegister src2,
2326 const Condition cond) {
2327 // Dd = vdiv(Dn, Dm) double precision floating point division.
Steve Blockd0582a62009-12-15 09:54:21 +00002328 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2329 // Instruction details available in ARM DDI 0406A, A8-584.
2330 // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
2331 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
2332 ASSERT(CpuFeatures::IsEnabled(VFP3));
2333 emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
2334 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2335}
2336
2337
Leon Clarkee46be812010-01-19 14:06:41 +00002338void Assembler::vcmp(const DwVfpRegister src1,
2339 const DwVfpRegister src2,
Steve Blockd0582a62009-12-15 09:54:21 +00002340 const SBit s,
2341 const Condition cond) {
2342 // vcmp(Dd, Dm) double precision floating point comparison.
2343 // Instruction details available in ARM DDI 0406A, A8-570.
2344 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
2345 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
2346 ASSERT(CpuFeatures::IsEnabled(VFP3));
2347 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
2348 src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2349}
2350
2351
Iain Merrick75681382010-08-19 15:07:18 +01002352void Assembler::vcmp(const DwVfpRegister src1,
2353 const double src2,
2354 const SBit s,
2355 const Condition cond) {
2356 // vcmp(Dd, Dm) double precision floating point comparison.
2357 // Instruction details available in ARM DDI 0406A, A8-570.
2358 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
2359 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | 0000(3-0)
2360 ASSERT(CpuFeatures::IsEnabled(VFP3));
2361 ASSERT(src2 == 0.0);
2362 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
2363 src1.code()*B12 | 0x5*B9 | B8 | B6);
2364}
2365
2366
Russell Brenner90bac252010-11-18 13:33:46 -08002367void Assembler::vmsr(Register dst, Condition cond) {
2368 // Instruction details available in ARM DDI 0406A, A8-652.
2369 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
2370 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2371 ASSERT(CpuFeatures::IsEnabled(VFP3));
2372 emit(cond | 0xE*B24 | 0xE*B20 | B16 |
2373 dst.code()*B12 | 0xA*B8 | B4);
2374}
2375
2376
Steve Blockd0582a62009-12-15 09:54:21 +00002377void Assembler::vmrs(Register dst, Condition cond) {
2378 // Instruction details available in ARM DDI 0406A, A8-652.
2379 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
2380 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
2381 ASSERT(CpuFeatures::IsEnabled(VFP3));
2382 emit(cond | 0xE*B24 | 0xF*B20 | B16 |
2383 dst.code()*B12 | 0xA*B8 | B4);
2384}
2385
2386
Steve Block8defd9f2010-07-08 12:39:36 +01002387void Assembler::vsqrt(const DwVfpRegister dst,
2388 const DwVfpRegister src,
2389 const Condition cond) {
2390 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
2391 // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
2392 ASSERT(CpuFeatures::IsEnabled(VFP3));
2393 emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
2394 dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
2395}
2396
2397
Andrei Popescu31002712010-02-23 13:46:05 +00002398// Pseudo instructions.
Steve Block6ded16b2010-05-10 14:33:55 +01002399void Assembler::nop(int type) {
2400 // This is mov rx, rx.
2401 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2402 emit(al | 13*B21 | type*B12 | type);
2403}
2404
2405
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002406bool Assembler::IsNop(Instr instr, int type) {
2407 // Check for mov rx, rx.
2408 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2409 return instr == (al | 13*B21 | type*B12 | type);
2410}
2411
2412
Steve Blockd0582a62009-12-15 09:54:21 +00002413bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
2414 uint32_t dummy1;
2415 uint32_t dummy2;
2416 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
2417}
2418
2419
2420void Assembler::BlockConstPoolFor(int instructions) {
2421 BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
2422}
2423
2424
Andrei Popescu31002712010-02-23 13:46:05 +00002425// Debugging.
Steve Blocka7e24c12009-10-30 11:49:00 +00002426void Assembler::RecordJSReturn() {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08002427 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00002428 CheckBuffer();
2429 RecordRelocInfo(RelocInfo::JS_RETURN);
2430}
2431
2432
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002433void Assembler::RecordDebugBreakSlot() {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08002434 positions_recorder()->WriteRecordedPositions();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002435 CheckBuffer();
2436 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2437}
2438
2439
Steve Blocka7e24c12009-10-30 11:49:00 +00002440void Assembler::RecordComment(const char* msg) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01002441 if (FLAG_code_comments) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002442 CheckBuffer();
2443 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2444 }
2445}
2446
2447
Steve Blocka7e24c12009-10-30 11:49:00 +00002448void Assembler::GrowBuffer() {
2449 if (!own_buffer_) FATAL("external code buffer is too small");
2450
Andrei Popescu31002712010-02-23 13:46:05 +00002451 // Compute new buffer size.
Steve Blocka7e24c12009-10-30 11:49:00 +00002452 CodeDesc desc; // the new buffer
2453 if (buffer_size_ < 4*KB) {
2454 desc.buffer_size = 4*KB;
2455 } else if (buffer_size_ < 1*MB) {
2456 desc.buffer_size = 2*buffer_size_;
2457 } else {
2458 desc.buffer_size = buffer_size_ + 1*MB;
2459 }
2460 CHECK_GT(desc.buffer_size, 0); // no overflow
2461
Andrei Popescu31002712010-02-23 13:46:05 +00002462 // Setup new buffer.
Steve Blocka7e24c12009-10-30 11:49:00 +00002463 desc.buffer = NewArray<byte>(desc.buffer_size);
2464
2465 desc.instr_size = pc_offset();
2466 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2467
Andrei Popescu31002712010-02-23 13:46:05 +00002468 // Copy the data.
Steve Blocka7e24c12009-10-30 11:49:00 +00002469 int pc_delta = desc.buffer - buffer_;
2470 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2471 memmove(desc.buffer, buffer_, desc.instr_size);
2472 memmove(reloc_info_writer.pos() + rc_delta,
2473 reloc_info_writer.pos(), desc.reloc_size);
2474
Andrei Popescu31002712010-02-23 13:46:05 +00002475 // Switch buffers.
Steve Blocka7e24c12009-10-30 11:49:00 +00002476 DeleteArray(buffer_);
2477 buffer_ = desc.buffer;
2478 buffer_size_ = desc.buffer_size;
2479 pc_ += pc_delta;
2480 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2481 reloc_info_writer.last_pc() + pc_delta);
2482
Andrei Popescu31002712010-02-23 13:46:05 +00002483 // None of our relocation types are pc relative pointing outside the code
Steve Blocka7e24c12009-10-30 11:49:00 +00002484 // buffer nor pc absolute pointing inside the code buffer, so there is no need
Andrei Popescu31002712010-02-23 13:46:05 +00002485 // to relocate any emitted relocation entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00002486
Andrei Popescu31002712010-02-23 13:46:05 +00002487 // Relocate pending relocation entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00002488 for (int i = 0; i < num_prinfo_; i++) {
2489 RelocInfo& rinfo = prinfo_[i];
2490 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2491 rinfo.rmode() != RelocInfo::POSITION);
2492 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
2493 rinfo.set_pc(rinfo.pc() + pc_delta);
2494 }
2495 }
2496}
2497
2498
Ben Murdochb0fe1622011-05-05 13:52:32 +01002499void Assembler::db(uint8_t data) {
2500 CheckBuffer();
2501 *reinterpret_cast<uint8_t*>(pc_) = data;
2502 pc_ += sizeof(uint8_t);
2503}
2504
2505
2506void Assembler::dd(uint32_t data) {
2507 CheckBuffer();
2508 *reinterpret_cast<uint32_t*>(pc_) = data;
2509 pc_ += sizeof(uint32_t);
2510}
2511
2512
Steve Blocka7e24c12009-10-30 11:49:00 +00002513void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2514 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002515 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
Andrei Popescu31002712010-02-23 13:46:05 +00002516 // Adjust code for new modes.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002517 ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2518 || RelocInfo::IsJSReturn(rmode)
Steve Blocka7e24c12009-10-30 11:49:00 +00002519 || RelocInfo::IsComment(rmode)
2520 || RelocInfo::IsPosition(rmode));
Andrei Popescu31002712010-02-23 13:46:05 +00002521 // These modes do not need an entry in the constant pool.
Steve Blocka7e24c12009-10-30 11:49:00 +00002522 } else {
2523 ASSERT(num_prinfo_ < kMaxNumPRInfo);
2524 prinfo_[num_prinfo_++] = rinfo;
2525 // Make sure the constant pool is not emitted in place of the next
Andrei Popescu31002712010-02-23 13:46:05 +00002526 // instruction for which we just recorded relocation info.
Steve Blocka7e24c12009-10-30 11:49:00 +00002527 BlockConstPoolBefore(pc_offset() + kInstrSize);
2528 }
2529 if (rinfo.rmode() != RelocInfo::NONE) {
2530 // Don't record external references unless the heap will be serialized.
Steve Blockd0582a62009-12-15 09:54:21 +00002531 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2532#ifdef DEBUG
2533 if (!Serializer::enabled()) {
2534 Serializer::TooLateToEnableNow();
2535 }
2536#endif
2537 if (!Serializer::enabled() && !FLAG_debug_code) {
2538 return;
2539 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002540 }
2541 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
2542 reloc_info_writer.Write(&rinfo);
2543 }
2544}
2545
2546
2547void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2548 // Calculate the offset of the next check. It will be overwritten
2549 // when a const pool is generated or when const pools are being
2550 // blocked for a specific range.
2551 next_buffer_check_ = pc_offset() + kCheckConstInterval;
2552
Andrei Popescu31002712010-02-23 13:46:05 +00002553 // There is nothing to do if there are no pending relocation info entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00002554 if (num_prinfo_ == 0) return;
2555
2556 // We emit a constant pool at regular intervals of about kDistBetweenPools
2557 // or when requested by parameter force_emit (e.g. after each function).
2558 // We prefer not to emit a jump unless the max distance is reached or if we
2559 // are running low on slots, which can happen if a lot of constants are being
2560 // emitted (e.g. --debug-code and many static references).
2561 int dist = pc_offset() - last_const_pool_end_;
2562 if (!force_emit && dist < kMaxDistBetweenPools &&
2563 (require_jump || dist < kDistBetweenPools) &&
2564 // TODO(1236125): Cleanup the "magic" number below. We know that
2565 // the code generation will test every kCheckConstIntervalInst.
2566 // Thus we are safe as long as we generate less than 7 constant
2567 // entries per instruction.
2568 (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
2569 return;
2570 }
2571
2572 // If we did not return by now, we need to emit the constant pool soon.
2573
2574 // However, some small sequences of instructions must not be broken up by the
2575 // insertion of a constant pool; such sequences are protected by setting
Steve Block6ded16b2010-05-10 14:33:55 +01002576 // either const_pool_blocked_nesting_ or no_const_pool_before_, which are
2577 // both checked here. Also, recursive calls to CheckConstPool are blocked by
2578 // no_const_pool_before_.
2579 if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
Andrei Popescu31002712010-02-23 13:46:05 +00002580 // Emission is currently blocked; make sure we try again as soon as
2581 // possible.
Steve Block6ded16b2010-05-10 14:33:55 +01002582 if (const_pool_blocked_nesting_ > 0) {
2583 next_buffer_check_ = pc_offset() + kInstrSize;
2584 } else {
2585 next_buffer_check_ = no_const_pool_before_;
2586 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002587
Andrei Popescu31002712010-02-23 13:46:05 +00002588 // Something is wrong if emission is forced and blocked at the same time.
Steve Blocka7e24c12009-10-30 11:49:00 +00002589 ASSERT(!force_emit);
2590 return;
2591 }
2592
2593 int jump_instr = require_jump ? kInstrSize : 0;
2594
2595 // Check that the code buffer is large enough before emitting the constant
2596 // pool and relocation information (include the jump over the pool and the
2597 // constant pool marker).
2598 int max_needed_space =
2599 jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
2600 while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
2601
Andrei Popescu31002712010-02-23 13:46:05 +00002602 // Block recursive calls to CheckConstPool.
Steve Blocka7e24c12009-10-30 11:49:00 +00002603 BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
2604 num_prinfo_*kInstrSize);
2605 // Don't bother to check for the emit calls below.
2606 next_buffer_check_ = no_const_pool_before_;
2607
Andrei Popescu31002712010-02-23 13:46:05 +00002608 // Emit jump over constant pool if necessary.
Steve Blocka7e24c12009-10-30 11:49:00 +00002609 Label after_pool;
2610 if (require_jump) b(&after_pool);
2611
2612 RecordComment("[ Constant Pool");
2613
Andrei Popescu31002712010-02-23 13:46:05 +00002614 // Put down constant pool marker "Undefined instruction" as specified by
2615 // A3.1 Instruction set encoding.
Steve Blocka7e24c12009-10-30 11:49:00 +00002616 emit(0x03000000 | num_prinfo_);
2617
Andrei Popescu31002712010-02-23 13:46:05 +00002618 // Emit constant pool entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00002619 for (int i = 0; i < num_prinfo_; i++) {
2620 RelocInfo& rinfo = prinfo_[i];
2621 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2622 rinfo.rmode() != RelocInfo::POSITION &&
2623 rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
2624 Instr instr = instr_at(rinfo.pc());
2625
Andrei Popescu31002712010-02-23 13:46:05 +00002626 // Instruction to patch must be a ldr/str [pc, #offset].
2627 // P and U set, B and W clear, Rn == pc, offset12 still 0.
Steve Blocka7e24c12009-10-30 11:49:00 +00002628 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
2629 (2*B25 | P | U | pc.code()*B16));
2630 int delta = pc_ - rinfo.pc() - 8;
2631 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
2632 if (delta < 0) {
2633 instr &= ~U;
2634 delta = -delta;
2635 }
2636 ASSERT(is_uint12(delta));
2637 instr_at_put(rinfo.pc(), instr + delta);
2638 emit(rinfo.data());
2639 }
2640 num_prinfo_ = 0;
2641 last_const_pool_end_ = pc_offset();
2642
2643 RecordComment("]");
2644
2645 if (after_pool.is_linked()) {
2646 bind(&after_pool);
2647 }
2648
2649 // Since a constant pool was just emitted, move the check offset forward by
2650 // the standard interval.
2651 next_buffer_check_ = pc_offset() + kCheckConstInterval;
2652}
2653
2654
2655} } // namespace v8::internal
Leon Clarkef7060e22010-06-03 12:02:55 +01002656
2657#endif // V8_TARGET_ARCH_ARM