blob: ec28da400255ddac296032c684ab17e8b87b25ff [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
Leon Clarked91b9f72010-01-27 17:25:45 +000033// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
Ben Murdoch8b112d22011-06-08 16:22:53 +010035// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +000036
37#include "v8.h"
38
Leon Clarkef7060e22010-06-03 12:02:55 +010039#if defined(V8_TARGET_ARCH_ARM)
40
Steve Blocka7e24c12009-10-30 11:49:00 +000041#include "arm/assembler-arm-inl.h"
42#include "serialize.h"
43
44namespace v8 {
45namespace internal {
46
Ben Murdoch8b112d22011-06-08 16:22:53 +010047#ifdef DEBUG
48bool CpuFeatures::initialized_ = false;
49#endif
50unsigned CpuFeatures::supported_ = 0;
51unsigned CpuFeatures::found_by_runtime_probing_ = 0;
52
Andrei Popescu402d9372010-02-26 13:31:12 +000053
Ben Murdoch257744e2011-11-30 15:57:28 +000054// Get the CPU features enabled by the build. For cross compilation the
55// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP_INSTRUCTIONS
56// can be defined to enable ARMv7 and VFPv3 instructions when building the
57// snapshot.
Andrei Popescu402d9372010-02-26 13:31:12 +000058static uint64_t CpuFeaturesImpliedByCompiler() {
59 uint64_t answer = 0;
60#ifdef CAN_USE_ARMV7_INSTRUCTIONS
61 answer |= 1u << ARMv7;
62#endif // def CAN_USE_ARMV7_INSTRUCTIONS
Ben Murdoch257744e2011-11-30 15:57:28 +000063#ifdef CAN_USE_VFP_INSTRUCTIONS
64 answer |= 1u << VFP3 | 1u << ARMv7;
65#endif // def CAN_USE_VFP_INSTRUCTIONS
66
67#ifdef __arm__
Andrei Popescu402d9372010-02-26 13:31:12 +000068 // If the compiler is allowed to use VFP then we can use VFP too in our code
Ben Murdoch3ef787d2012-04-12 10:51:47 +010069 // generation even when generating snapshots. ARMv7 and hardware floating
70 // point support implies VFPv3, see ARM DDI 0406B, page A1-6.
71#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
72 && !defined(__SOFTFP__)
Ben Murdoch8b112d22011-06-08 16:22:53 +010073 answer |= 1u << VFP3 | 1u << ARMv7;
Ben Murdoch3ef787d2012-04-12 10:51:47 +010074#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
75 // && !defined(__SOFTFP__)
Ben Murdoch257744e2011-11-30 15:57:28 +000076#endif // def __arm__
77
Andrei Popescu402d9372010-02-26 13:31:12 +000078 return answer;
79}
Andrei Popescu402d9372010-02-26 13:31:12 +000080
81
Ben Murdoch8b112d22011-06-08 16:22:53 +010082void CpuFeatures::Probe() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +010083 unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
84 CpuFeaturesImpliedByCompiler());
85 ASSERT(supported_ == 0 || supported_ == standard_features);
Ben Murdoch8b112d22011-06-08 16:22:53 +010086#ifdef DEBUG
87 initialized_ = true;
88#endif
Ben Murdoch257744e2011-11-30 15:57:28 +000089
90 // Get the features implied by the OS and the compiler settings. This is the
91 // minimal set of features which is also alowed for generated code in the
92 // snapshot.
Ben Murdoch3ef787d2012-04-12 10:51:47 +010093 supported_ |= standard_features;
Ben Murdoch257744e2011-11-30 15:57:28 +000094
95 if (Serializer::enabled()) {
96 // No probing for features if we might serialize (generate snapshot).
97 return;
98 }
99
Andrei Popescu402d9372010-02-26 13:31:12 +0000100#ifndef __arm__
Ben Murdoch8b112d22011-06-08 16:22:53 +0100101 // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
102 // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
Andrei Popescu31002712010-02-23 13:46:05 +0000103 if (FLAG_enable_vfp3) {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100104 supported_ |= 1u << VFP3 | 1u << ARMv7;
Andrei Popescu31002712010-02-23 13:46:05 +0000105 }
106 // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
107 if (FLAG_enable_armv7) {
Steve Block6ded16b2010-05-10 14:33:55 +0100108 supported_ |= 1u << ARMv7;
Andrei Popescu31002712010-02-23 13:46:05 +0000109 }
Andrei Popescu402d9372010-02-26 13:31:12 +0000110#else // def __arm__
Ben Murdoch257744e2011-11-30 15:57:28 +0000111 // Probe for additional features not already known to be available.
112 if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100113 // This implementation also sets the VFP flags if runtime
114 // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
115 // 0406B, page A1-6.
116 supported_ |= 1u << VFP3 | 1u << ARMv7;
117 found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
Steve Blockd0582a62009-12-15 09:54:21 +0000118 }
Andrei Popescu31002712010-02-23 13:46:05 +0000119
Ben Murdoch257744e2011-11-30 15:57:28 +0000120 if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000121 supported_ |= 1u << ARMv7;
122 found_by_runtime_probing_ |= 1u << ARMv7;
123 }
Steve Block6ded16b2010-05-10 14:33:55 +0100124#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000125}
126
127
Steve Blocka7e24c12009-10-30 11:49:00 +0000128// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000129// Implementation of RelocInfo
130
131const int RelocInfo::kApplyMask = 0;
132
133
Leon Clarkef7060e22010-06-03 12:02:55 +0100134bool RelocInfo::IsCodedSpecially() {
135 // The deserializer needs to know whether a pointer is specially coded. Being
136 // specially coded on ARM means that it is a movw/movt instruction. We don't
137 // generate those yet.
138 return false;
139}
140
141
Steve Blocka7e24c12009-10-30 11:49:00 +0000142void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
143 // Patch the code at the current address with the supplied instructions.
144 Instr* pc = reinterpret_cast<Instr*>(pc_);
145 Instr* instr = reinterpret_cast<Instr*>(instructions);
146 for (int i = 0; i < instruction_count; i++) {
147 *(pc + i) = *(instr + i);
148 }
149
150 // Indicate that code has changed.
151 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
152}
153
154
155// Patch the code at the current PC with a call to the target address.
156// Additional guard instructions can be added if required.
157void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
158 // Patch the code at the current address with a call to the target.
159 UNIMPLEMENTED();
160}
161
162
163// -----------------------------------------------------------------------------
164// Implementation of Operand and MemOperand
165// See assembler-arm-inl.h for inlined constructors
166
167Operand::Operand(Handle<Object> handle) {
168 rm_ = no_reg;
169 // Verify all Objects referred by code are NOT in new space.
170 Object* obj = *handle;
Steve Block44f0eee2011-05-26 01:26:41 +0100171 ASSERT(!HEAP->InNewSpace(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +0000172 if (obj->IsHeapObject()) {
173 imm32_ = reinterpret_cast<intptr_t>(handle.location());
174 rmode_ = RelocInfo::EMBEDDED_OBJECT;
175 } else {
176 // no relocation needed
177 imm32_ = reinterpret_cast<intptr_t>(obj);
178 rmode_ = RelocInfo::NONE;
179 }
180}
181
182
183Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
184 ASSERT(is_uint5(shift_imm));
185 ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
186 rm_ = rm;
187 rs_ = no_reg;
188 shift_op_ = shift_op;
189 shift_imm_ = shift_imm & 31;
190 if (shift_op == RRX) {
191 // encoded as ROR with shift_imm == 0
192 ASSERT(shift_imm == 0);
193 shift_op_ = ROR;
194 shift_imm_ = 0;
195 }
196}
197
198
199Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
200 ASSERT(shift_op != RRX);
201 rm_ = rm;
202 rs_ = no_reg;
203 shift_op_ = shift_op;
204 rs_ = rs;
205}
206
207
208MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
209 rn_ = rn;
210 rm_ = no_reg;
211 offset_ = offset;
212 am_ = am;
213}
214
215MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
216 rn_ = rn;
217 rm_ = rm;
218 shift_op_ = LSL;
219 shift_imm_ = 0;
220 am_ = am;
221}
222
223
224MemOperand::MemOperand(Register rn, Register rm,
225 ShiftOp shift_op, int shift_imm, AddrMode am) {
226 ASSERT(is_uint5(shift_imm));
227 rn_ = rn;
228 rm_ = rm;
229 shift_op_ = shift_op;
230 shift_imm_ = shift_imm & 31;
231 am_ = am;
232}
233
234
235// -----------------------------------------------------------------------------
Steve Block1e0659c2011-05-24 12:43:12 +0100236// Specific instructions, constants, and masks.
Steve Blocka7e24c12009-10-30 11:49:00 +0000237
238// add(sp, sp, 4) instruction (aka Pop())
Steve Block1e0659c2011-05-24 12:43:12 +0100239const Instr kPopInstruction =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100240 al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
241 kRegister_sp_Code * B12;
Steve Blocka7e24c12009-10-30 11:49:00 +0000242// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
243// register r is not encoded.
Steve Block1e0659c2011-05-24 12:43:12 +0100244const Instr kPushRegPattern =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100245 al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
Steve Blocka7e24c12009-10-30 11:49:00 +0000246// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
247// register r is not encoded.
Steve Block1e0659c2011-05-24 12:43:12 +0100248const Instr kPopRegPattern =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100249 al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
Steve Blocka7e24c12009-10-30 11:49:00 +0000250// mov lr, pc
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100251const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
Steve Block6ded16b2010-05-10 14:33:55 +0100252// ldr rd, [pc, #offset]
Steve Block1e0659c2011-05-24 12:43:12 +0100253const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100254const Instr kLdrPCPattern = al | 5 * B24 | L | kRegister_pc_Code * B16;
Steve Block6ded16b2010-05-10 14:33:55 +0100255// blxcc rm
256const Instr kBlxRegMask =
257 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
258const Instr kBlxRegPattern =
Steve Block1e0659c2011-05-24 12:43:12 +0100259 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100260const Instr kBlxIp = al | kBlxRegPattern | ip.code();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100261const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
262const Instr kMovMvnPattern = 0xd * B21;
263const Instr kMovMvnFlip = B22;
264const Instr kMovLeaveCCMask = 0xdff * B16;
265const Instr kMovLeaveCCPattern = 0x1a0 * B16;
266const Instr kMovwMask = 0xff * B20;
267const Instr kMovwPattern = 0x30 * B20;
268const Instr kMovwLeaveCCFlip = 0x5 * B21;
269const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
270const Instr kCmpCmnPattern = 0x15 * B20;
271const Instr kCmpCmnFlip = B21;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100272const Instr kAddSubFlip = 0x6 * B21;
273const Instr kAndBicFlip = 0xe * B21;
274
Leon Clarkef7060e22010-06-03 12:02:55 +0100275// A mask for the Rd register for push, pop, ldr, str instructions.
Steve Block1e0659c2011-05-24 12:43:12 +0100276const Instr kLdrRegFpOffsetPattern =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100277 al | B26 | L | Offset | kRegister_fp_Code * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100278const Instr kStrRegFpOffsetPattern =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100279 al | B26 | Offset | kRegister_fp_Code * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100280const Instr kLdrRegFpNegOffsetPattern =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100281 al | B26 | L | NegOffset | kRegister_fp_Code * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100282const Instr kStrRegFpNegOffsetPattern =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100283 al | B26 | NegOffset | kRegister_fp_Code * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100284const Instr kLdrStrInstrTypeMask = 0xffff0000;
285const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
286const Instr kLdrStrOffsetMask = 0x00000fff;
287
Steve Blocka7e24c12009-10-30 11:49:00 +0000288
Andrei Popescu31002712010-02-23 13:46:05 +0000289// Spare buffer.
Steve Blocka7e24c12009-10-30 11:49:00 +0000290static const int kMinimalBufferSize = 4*KB;
Steve Blocka7e24c12009-10-30 11:49:00 +0000291
Steve Block1e0659c2011-05-24 12:43:12 +0100292
Ben Murdoch8b112d22011-06-08 16:22:53 +0100293Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
294 : AssemblerBase(arg_isolate),
Steve Block44f0eee2011-05-26 01:26:41 +0100295 positions_recorder_(this),
Steve Block44f0eee2011-05-26 01:26:41 +0100296 emit_debug_code_(FLAG_debug_code) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000297 if (buffer == NULL) {
Andrei Popescu31002712010-02-23 13:46:05 +0000298 // Do our own buffer management.
Steve Blocka7e24c12009-10-30 11:49:00 +0000299 if (buffer_size <= kMinimalBufferSize) {
300 buffer_size = kMinimalBufferSize;
301
Steve Block44f0eee2011-05-26 01:26:41 +0100302 if (isolate()->assembler_spare_buffer() != NULL) {
303 buffer = isolate()->assembler_spare_buffer();
304 isolate()->set_assembler_spare_buffer(NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000305 }
306 }
307 if (buffer == NULL) {
308 buffer_ = NewArray<byte>(buffer_size);
309 } else {
310 buffer_ = static_cast<byte*>(buffer);
311 }
312 buffer_size_ = buffer_size;
313 own_buffer_ = true;
314
315 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000316 // Use externally provided buffer instead.
Steve Blocka7e24c12009-10-30 11:49:00 +0000317 ASSERT(buffer_size > 0);
318 buffer_ = static_cast<byte*>(buffer);
319 buffer_size_ = buffer_size;
320 own_buffer_ = false;
321 }
322
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100323 // Set up buffer pointers.
Steve Blocka7e24c12009-10-30 11:49:00 +0000324 ASSERT(buffer_ != NULL);
325 pc_ = buffer_;
326 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000327 num_pending_reloc_info_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000328 next_buffer_check_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100329 const_pool_blocked_nesting_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000330 no_const_pool_before_ = 0;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000331 first_const_pool_use_ = -1;
Steve Blocka7e24c12009-10-30 11:49:00 +0000332 last_bound_pos_ = 0;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000333 ClearRecordedAstId();
Steve Blocka7e24c12009-10-30 11:49:00 +0000334}
335
336
337Assembler::~Assembler() {
Steve Block6ded16b2010-05-10 14:33:55 +0100338 ASSERT(const_pool_blocked_nesting_ == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000339 if (own_buffer_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100340 if (isolate()->assembler_spare_buffer() == NULL &&
341 buffer_size_ == kMinimalBufferSize) {
342 isolate()->set_assembler_spare_buffer(buffer_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000343 } else {
344 DeleteArray(buffer_);
345 }
346 }
347}
348
349
350void Assembler::GetCode(CodeDesc* desc) {
Andrei Popescu31002712010-02-23 13:46:05 +0000351 // Emit constant pool if necessary.
Steve Blocka7e24c12009-10-30 11:49:00 +0000352 CheckConstPool(true, false);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000353 ASSERT(num_pending_reloc_info_ == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000354
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100355 // Set up code descriptor.
Steve Blocka7e24c12009-10-30 11:49:00 +0000356 desc->buffer = buffer_;
357 desc->buffer_size = buffer_size_;
358 desc->instr_size = pc_offset();
359 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
360}
361
362
363void Assembler::Align(int m) {
364 ASSERT(m >= 4 && IsPowerOf2(m));
365 while ((pc_offset() & (m - 1)) != 0) {
366 nop();
367 }
368}
369
370
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100371void Assembler::CodeTargetAlign() {
372 // Preferred alignment of jump targets on some ARM chips.
373 Align(8);
374}
375
376
Steve Block1e0659c2011-05-24 12:43:12 +0100377Condition Assembler::GetCondition(Instr instr) {
378 return Instruction::ConditionField(instr);
379}
380
381
Steve Block6ded16b2010-05-10 14:33:55 +0100382bool Assembler::IsBranch(Instr instr) {
383 return (instr & (B27 | B25)) == (B27 | B25);
384}
385
386
387int Assembler::GetBranchOffset(Instr instr) {
388 ASSERT(IsBranch(instr));
389 // Take the jump offset in the lower 24 bits, sign extend it and multiply it
390 // with 4 to get the offset in bytes.
Steve Block1e0659c2011-05-24 12:43:12 +0100391 return ((instr & kImm24Mask) << 8) >> 6;
Steve Block6ded16b2010-05-10 14:33:55 +0100392}
393
394
395bool Assembler::IsLdrRegisterImmediate(Instr instr) {
396 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
397}
398
399
400int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
401 ASSERT(IsLdrRegisterImmediate(instr));
402 bool positive = (instr & B23) == B23;
Steve Block1e0659c2011-05-24 12:43:12 +0100403 int offset = instr & kOff12Mask; // Zero extended offset.
Steve Block6ded16b2010-05-10 14:33:55 +0100404 return positive ? offset : -offset;
405}
406
407
408Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
409 ASSERT(IsLdrRegisterImmediate(instr));
410 bool positive = offset >= 0;
411 if (!positive) offset = -offset;
412 ASSERT(is_uint12(offset));
413 // Set bit indicating whether the offset should be added.
414 instr = (instr & ~B23) | (positive ? B23 : 0);
415 // Set the actual offset.
Steve Block1e0659c2011-05-24 12:43:12 +0100416 return (instr & ~kOff12Mask) | offset;
Steve Block6ded16b2010-05-10 14:33:55 +0100417}
418
419
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100420bool Assembler::IsStrRegisterImmediate(Instr instr) {
421 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
422}
423
424
425Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
426 ASSERT(IsStrRegisterImmediate(instr));
427 bool positive = offset >= 0;
428 if (!positive) offset = -offset;
429 ASSERT(is_uint12(offset));
430 // Set bit indicating whether the offset should be added.
431 instr = (instr & ~B23) | (positive ? B23 : 0);
432 // Set the actual offset.
Steve Block1e0659c2011-05-24 12:43:12 +0100433 return (instr & ~kOff12Mask) | offset;
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100434}
435
436
437bool Assembler::IsAddRegisterImmediate(Instr instr) {
438 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
439}
440
441
442Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
443 ASSERT(IsAddRegisterImmediate(instr));
444 ASSERT(offset >= 0);
445 ASSERT(is_uint12(offset));
446 // Set the offset.
Steve Block1e0659c2011-05-24 12:43:12 +0100447 return (instr & ~kOff12Mask) | offset;
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100448}
449
450
Leon Clarkef7060e22010-06-03 12:02:55 +0100451Register Assembler::GetRd(Instr instr) {
452 Register reg;
Steve Block1e0659c2011-05-24 12:43:12 +0100453 reg.code_ = Instruction::RdValue(instr);
454 return reg;
455}
456
457
458Register Assembler::GetRn(Instr instr) {
459 Register reg;
460 reg.code_ = Instruction::RnValue(instr);
461 return reg;
462}
463
464
465Register Assembler::GetRm(Instr instr) {
466 Register reg;
467 reg.code_ = Instruction::RmValue(instr);
Leon Clarkef7060e22010-06-03 12:02:55 +0100468 return reg;
469}
470
471
472bool Assembler::IsPush(Instr instr) {
473 return ((instr & ~kRdMask) == kPushRegPattern);
474}
475
476
477bool Assembler::IsPop(Instr instr) {
478 return ((instr & ~kRdMask) == kPopRegPattern);
479}
480
481
482bool Assembler::IsStrRegFpOffset(Instr instr) {
483 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
484}
485
486
487bool Assembler::IsLdrRegFpOffset(Instr instr) {
488 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
489}
490
491
492bool Assembler::IsStrRegFpNegOffset(Instr instr) {
493 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
494}
495
496
497bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
498 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
499}
500
501
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800502bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
503 // Check the instruction is indeed a
504 // ldr<cond> <Rd>, [pc +/- offset_12].
Steve Block1e0659c2011-05-24 12:43:12 +0100505 return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800506}
507
508
Steve Block1e0659c2011-05-24 12:43:12 +0100509bool Assembler::IsTstImmediate(Instr instr) {
510 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
511 (I | TST | S);
512}
513
514
515bool Assembler::IsCmpRegister(Instr instr) {
516 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
517 (CMP | S);
518}
519
520
521bool Assembler::IsCmpImmediate(Instr instr) {
522 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
523 (I | CMP | S);
524}
525
526
527Register Assembler::GetCmpImmediateRegister(Instr instr) {
528 ASSERT(IsCmpImmediate(instr));
529 return GetRn(instr);
530}
531
532
533int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
534 ASSERT(IsCmpImmediate(instr));
535 return instr & kOff12Mask;
536}
537
Steve Blocka7e24c12009-10-30 11:49:00 +0000538// Labels refer to positions in the (to be) generated code.
539// There are bound, linked, and unused labels.
540//
541// Bound labels refer to known positions in the already
542// generated code. pos() is the position the label refers to.
543//
544// Linked labels refer to unknown positions in the code
545// to be generated; pos() is the position of the last
546// instruction using the label.
547
548
549// The link chain is terminated by a negative code position (must be aligned)
550const int kEndOfChain = -4;
551
552
553int Assembler::target_at(int pos) {
554 Instr instr = instr_at(pos);
Steve Block1e0659c2011-05-24 12:43:12 +0100555 if ((instr & ~kImm24Mask) == 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000556 // Emitted label constant, not part of a branch.
557 return instr - (Code::kHeaderSize - kHeapObjectTag);
558 }
559 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
Steve Block1e0659c2011-05-24 12:43:12 +0100560 int imm26 = ((instr & kImm24Mask) << 8) >> 6;
561 if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
562 ((instr & B24) != 0)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000563 // blx uses bit 24 to encode bit 2 of imm26
564 imm26 += 2;
Steve Block6ded16b2010-05-10 14:33:55 +0100565 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000566 return pos + kPcLoadDelta + imm26;
567}
568
569
570void Assembler::target_at_put(int pos, int target_pos) {
571 Instr instr = instr_at(pos);
Steve Block1e0659c2011-05-24 12:43:12 +0100572 if ((instr & ~kImm24Mask) == 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000573 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
574 // Emitted label constant, not part of a branch.
575 // Make label relative to Code* of generated Code object.
576 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
577 return;
578 }
579 int imm26 = target_pos - (pos + kPcLoadDelta);
580 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
Steve Block1e0659c2011-05-24 12:43:12 +0100581 if (Instruction::ConditionField(instr) == kSpecialCondition) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000582 // blx uses bit 24 to encode bit 2 of imm26
583 ASSERT((imm26 & 1) == 0);
Steve Block1e0659c2011-05-24 12:43:12 +0100584 instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
Steve Blocka7e24c12009-10-30 11:49:00 +0000585 } else {
586 ASSERT((imm26 & 3) == 0);
Steve Block1e0659c2011-05-24 12:43:12 +0100587 instr &= ~kImm24Mask;
Steve Blocka7e24c12009-10-30 11:49:00 +0000588 }
589 int imm24 = imm26 >> 2;
590 ASSERT(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +0100591 instr_at_put(pos, instr | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +0000592}
593
594
595void Assembler::print(Label* L) {
596 if (L->is_unused()) {
597 PrintF("unused label\n");
598 } else if (L->is_bound()) {
599 PrintF("bound label to %d\n", L->pos());
600 } else if (L->is_linked()) {
601 Label l = *L;
602 PrintF("unbound label");
603 while (l.is_linked()) {
604 PrintF("@ %d ", l.pos());
605 Instr instr = instr_at(l.pos());
Steve Block1e0659c2011-05-24 12:43:12 +0100606 if ((instr & ~kImm24Mask) == 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000607 PrintF("value\n");
608 } else {
609 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
Steve Block1e0659c2011-05-24 12:43:12 +0100610 Condition cond = Instruction::ConditionField(instr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000611 const char* b;
612 const char* c;
Steve Block1e0659c2011-05-24 12:43:12 +0100613 if (cond == kSpecialCondition) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000614 b = "blx";
615 c = "";
616 } else {
617 if ((instr & B24) != 0)
618 b = "bl";
619 else
620 b = "b";
621
622 switch (cond) {
623 case eq: c = "eq"; break;
624 case ne: c = "ne"; break;
625 case hs: c = "hs"; break;
626 case lo: c = "lo"; break;
627 case mi: c = "mi"; break;
628 case pl: c = "pl"; break;
629 case vs: c = "vs"; break;
630 case vc: c = "vc"; break;
631 case hi: c = "hi"; break;
632 case ls: c = "ls"; break;
633 case ge: c = "ge"; break;
634 case lt: c = "lt"; break;
635 case gt: c = "gt"; break;
636 case le: c = "le"; break;
637 case al: c = ""; break;
638 default:
639 c = "";
640 UNREACHABLE();
641 }
642 }
643 PrintF("%s%s\n", b, c);
644 }
645 next(&l);
646 }
647 } else {
648 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
649 }
650}
651
652
653void Assembler::bind_to(Label* L, int pos) {
654 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
655 while (L->is_linked()) {
656 int fixup_pos = L->pos();
657 next(L); // call next before overwriting link with target at fixup_pos
658 target_at_put(fixup_pos, pos);
659 }
660 L->bind_to(pos);
661
662 // Keep track of the last bound label so we don't eliminate any instructions
663 // before a bound label.
664 if (pos > last_bound_pos_)
665 last_bound_pos_ = pos;
666}
667
668
669void Assembler::link_to(Label* L, Label* appendix) {
670 if (appendix->is_linked()) {
671 if (L->is_linked()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000672 // Append appendix to L's list.
Steve Blocka7e24c12009-10-30 11:49:00 +0000673 int fixup_pos;
674 int link = L->pos();
675 do {
676 fixup_pos = link;
677 link = target_at(fixup_pos);
678 } while (link > 0);
679 ASSERT(link == kEndOfChain);
680 target_at_put(fixup_pos, appendix->pos());
681 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000682 // L is empty, simply use appendix.
Steve Blocka7e24c12009-10-30 11:49:00 +0000683 *L = *appendix;
684 }
685 }
686 appendix->Unuse(); // appendix should not be used anymore
687}
688
689
690void Assembler::bind(Label* L) {
691 ASSERT(!L->is_bound()); // label can only be bound once
692 bind_to(L, pc_offset());
693}
694
695
696void Assembler::next(Label* L) {
697 ASSERT(L->is_linked());
698 int link = target_at(L->pos());
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000699 if (link == kEndOfChain) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000700 L->Unuse();
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000701 } else {
702 ASSERT(link >= 0);
703 L->link_to(link);
Steve Blocka7e24c12009-10-30 11:49:00 +0000704 }
705}
706
707
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100708static Instr EncodeMovwImmediate(uint32_t immediate) {
709 ASSERT(immediate < 0x10000);
710 return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
711}
712
713
Andrei Popescu31002712010-02-23 13:46:05 +0000714// Low-level code emission routines depending on the addressing mode.
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100715// If this returns true then you have to use the rotate_imm and immed_8
716// that it returns, because it may have already changed the instruction
717// to match them!
Steve Blocka7e24c12009-10-30 11:49:00 +0000718static bool fits_shifter(uint32_t imm32,
719 uint32_t* rotate_imm,
720 uint32_t* immed_8,
721 Instr* instr) {
Andrei Popescu31002712010-02-23 13:46:05 +0000722 // imm32 must be unsigned.
Steve Blocka7e24c12009-10-30 11:49:00 +0000723 for (int rot = 0; rot < 16; rot++) {
724 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
725 if ((imm8 <= 0xff)) {
726 *rotate_imm = rot;
727 *immed_8 = imm8;
728 return true;
729 }
730 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100731 // If the opcode is one with a complementary version and the complementary
732 // immediate fits, change the opcode.
733 if (instr != NULL) {
734 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
735 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
736 *instr ^= kMovMvnFlip;
737 return true;
738 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100739 if (CpuFeatures::IsSupported(ARMv7)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100740 if (imm32 < 0x10000) {
741 *instr ^= kMovwLeaveCCFlip;
742 *instr |= EncodeMovwImmediate(imm32);
743 *rotate_imm = *immed_8 = 0; // Not used for movw.
744 return true;
745 }
746 }
747 }
748 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
749 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
750 *instr ^= kCmpCmnFlip;
751 return true;
752 }
753 } else {
754 Instr alu_insn = (*instr & kALUMask);
Steve Block1e0659c2011-05-24 12:43:12 +0100755 if (alu_insn == ADD ||
756 alu_insn == SUB) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100757 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
758 *instr ^= kAddSubFlip;
759 return true;
760 }
Steve Block1e0659c2011-05-24 12:43:12 +0100761 } else if (alu_insn == AND ||
762 alu_insn == BIC) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100763 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
764 *instr ^= kAndBicFlip;
765 return true;
766 }
767 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000768 }
769 }
770 return false;
771}
772
773
774// We have to use the temporary register for things that can be relocated even
775// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
776// space. There is no guarantee that the relocated location can be similarly
777// encoded.
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800778bool Operand::must_use_constant_pool() const {
779 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
Steve Blockd0582a62009-12-15 09:54:21 +0000780#ifdef DEBUG
781 if (!Serializer::enabled()) {
782 Serializer::TooLateToEnableNow();
783 }
Andrei Popescu402d9372010-02-26 13:31:12 +0000784#endif // def DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000785 return Serializer::enabled();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800786 } else if (rmode_ == RelocInfo::NONE) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000787 return false;
788 }
789 return true;
790}
791
792
Steve Block44f0eee2011-05-26 01:26:41 +0100793bool Operand::is_single_instruction(Instr instr) const {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100794 if (rm_.is_valid()) return true;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100795 uint32_t dummy1, dummy2;
Steve Block44f0eee2011-05-26 01:26:41 +0100796 if (must_use_constant_pool() ||
797 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
798 // The immediate operand cannot be encoded as a shifter operand, or use of
799 // constant pool is required. For a mov instruction not setting the
800 // condition code additional instruction conventions can be used.
801 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
802 if (must_use_constant_pool() ||
Ben Murdoch8b112d22011-06-08 16:22:53 +0100803 !CpuFeatures::IsSupported(ARMv7)) {
Steve Block44f0eee2011-05-26 01:26:41 +0100804 // mov instruction will be an ldr from constant pool (one instruction).
805 return true;
806 } else {
807 // mov instruction will be a mov or movw followed by movt (two
808 // instructions).
809 return false;
810 }
811 } else {
812 // If this is not a mov or mvn instruction there will always an additional
813 // instructions - either mov or ldr. The mov might actually be two
814 // instructions mov or movw followed by movt so including the actual
815 // instruction two or three instructions will be generated.
816 return false;
817 }
818 } else {
819 // No use of constant pool and the immediate operand can be encoded as a
820 // shifter operand.
821 return true;
822 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100823}
824
825
Steve Blocka7e24c12009-10-30 11:49:00 +0000826void Assembler::addrmod1(Instr instr,
827 Register rn,
828 Register rd,
829 const Operand& x) {
830 CheckBuffer();
Steve Block1e0659c2011-05-24 12:43:12 +0100831 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000832 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000833 // Immediate.
Steve Blocka7e24c12009-10-30 11:49:00 +0000834 uint32_t rotate_imm;
835 uint32_t immed_8;
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800836 if (x.must_use_constant_pool() ||
Steve Blocka7e24c12009-10-30 11:49:00 +0000837 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
838 // The immediate operand cannot be encoded as a shifter operand, so load
839 // it first to register ip and change the original instruction to use ip.
840 // However, if the original instruction is a 'mov rd, x' (not setting the
Andrei Popescu31002712010-02-23 13:46:05 +0000841 // condition code), then replace it with a 'ldr rd, [pc]'.
Steve Blocka7e24c12009-10-30 11:49:00 +0000842 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Steve Block1e0659c2011-05-24 12:43:12 +0100843 Condition cond = Instruction::ConditionField(instr);
844 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
Steve Block44f0eee2011-05-26 01:26:41 +0100845 if (x.must_use_constant_pool() ||
Ben Murdoch8b112d22011-06-08 16:22:53 +0100846 !CpuFeatures::IsSupported(ARMv7)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100847 RecordRelocInfo(x.rmode_, x.imm32_);
848 ldr(rd, MemOperand(pc, 0), cond);
849 } else {
850 // Will probably use movw, will certainly not use constant pool.
851 mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
852 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
853 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000854 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100855 // If this is not a mov or mvn instruction we may still be able to avoid
856 // a constant pool entry by using mvn or movw.
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800857 if (!x.must_use_constant_pool() &&
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100858 (instr & kMovMvnMask) != kMovMvnPattern) {
859 mov(ip, x, LeaveCC, cond);
860 } else {
861 RecordRelocInfo(x.rmode_, x.imm32_);
862 ldr(ip, MemOperand(pc, 0), cond);
863 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000864 addrmod1(instr, rn, rd, Operand(ip));
865 }
866 return;
867 }
868 instr |= I | rotate_imm*B8 | immed_8;
869 } else if (!x.rs_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000870 // Immediate shift.
Steve Blocka7e24c12009-10-30 11:49:00 +0000871 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
872 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000873 // Register shift.
Steve Blocka7e24c12009-10-30 11:49:00 +0000874 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
875 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
876 }
877 emit(instr | rn.code()*B16 | rd.code()*B12);
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100878 if (rn.is(pc) || x.rm_.is(pc)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000879 // Block constant pool emission for one instruction after reading pc.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000880 BlockConstPoolFor(1);
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100881 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000882}
883
884
885void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
Steve Block1e0659c2011-05-24 12:43:12 +0100886 ASSERT((instr & ~(kCondMask | B | L)) == B26);
Steve Blocka7e24c12009-10-30 11:49:00 +0000887 int am = x.am_;
888 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000889 // Immediate offset.
Steve Blocka7e24c12009-10-30 11:49:00 +0000890 int offset_12 = x.offset_;
891 if (offset_12 < 0) {
892 offset_12 = -offset_12;
893 am ^= U;
894 }
895 if (!is_uint12(offset_12)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000896 // Immediate offset cannot be encoded, load it first to register ip
897 // rn (and rd in a load) should never be ip, or will be trashed.
Steve Blocka7e24c12009-10-30 11:49:00 +0000898 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
Steve Block1e0659c2011-05-24 12:43:12 +0100899 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
Steve Blocka7e24c12009-10-30 11:49:00 +0000900 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
901 return;
902 }
903 ASSERT(offset_12 >= 0); // no masking needed
904 instr |= offset_12;
905 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000906 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
Steve Blocka7e24c12009-10-30 11:49:00 +0000907 // register offset the constructors make sure than both shift_imm_
Andrei Popescu31002712010-02-23 13:46:05 +0000908 // and shift_op_ are initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +0000909 ASSERT(!x.rm_.is(pc));
910 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
911 }
912 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
913 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
914}
915
916
917void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
Steve Block1e0659c2011-05-24 12:43:12 +0100918 ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
Steve Blocka7e24c12009-10-30 11:49:00 +0000919 ASSERT(x.rn_.is_valid());
920 int am = x.am_;
921 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000922 // Immediate offset.
Steve Blocka7e24c12009-10-30 11:49:00 +0000923 int offset_8 = x.offset_;
924 if (offset_8 < 0) {
925 offset_8 = -offset_8;
926 am ^= U;
927 }
928 if (!is_uint8(offset_8)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000929 // Immediate offset cannot be encoded, load it first to register ip
930 // rn (and rd in a load) should never be ip, or will be trashed.
Steve Blocka7e24c12009-10-30 11:49:00 +0000931 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
Steve Block1e0659c2011-05-24 12:43:12 +0100932 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
Steve Blocka7e24c12009-10-30 11:49:00 +0000933 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
934 return;
935 }
936 ASSERT(offset_8 >= 0); // no masking needed
937 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
938 } else if (x.shift_imm_ != 0) {
Andrei Popescu31002712010-02-23 13:46:05 +0000939 // Scaled register offset not supported, load index first
940 // rn (and rd in a load) should never be ip, or will be trashed.
Steve Blocka7e24c12009-10-30 11:49:00 +0000941 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
942 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
Steve Block1e0659c2011-05-24 12:43:12 +0100943 Instruction::ConditionField(instr));
Steve Blocka7e24c12009-10-30 11:49:00 +0000944 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
945 return;
946 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000947 // Register offset.
Steve Blocka7e24c12009-10-30 11:49:00 +0000948 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
949 instr |= x.rm_.code();
950 }
951 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
952 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
953}
954
955
956void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
Steve Block1e0659c2011-05-24 12:43:12 +0100957 ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
Steve Blocka7e24c12009-10-30 11:49:00 +0000958 ASSERT(rl != 0);
959 ASSERT(!rn.is(pc));
960 emit(instr | rn.code()*B16 | rl);
961}
962
963
964void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
Andrei Popescu31002712010-02-23 13:46:05 +0000965 // Unindexed addressing is not encoded by this function.
Steve Blocka7e24c12009-10-30 11:49:00 +0000966 ASSERT_EQ((B27 | B26),
Steve Block1e0659c2011-05-24 12:43:12 +0100967 (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000968 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
969 int am = x.am_;
970 int offset_8 = x.offset_;
971 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
972 offset_8 >>= 2;
973 if (offset_8 < 0) {
974 offset_8 = -offset_8;
975 am ^= U;
976 }
977 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
978 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
979
Andrei Popescu31002712010-02-23 13:46:05 +0000980 // Post-indexed addressing requires W == 1; different than in addrmod2/3.
Steve Blocka7e24c12009-10-30 11:49:00 +0000981 if ((am & P) == 0)
982 am |= W;
983
984 ASSERT(offset_8 >= 0); // no masking needed
985 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
986}
987
988
989int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
990 int target_pos;
991 if (L->is_bound()) {
992 target_pos = L->pos();
993 } else {
994 if (L->is_linked()) {
995 target_pos = L->pos(); // L's link
996 } else {
997 target_pos = kEndOfChain;
998 }
999 L->link_to(pc_offset());
1000 }
1001
1002 // Block the emission of the constant pool, since the branch instruction must
Andrei Popescu31002712010-02-23 13:46:05 +00001003 // be emitted at the pc offset recorded by the label.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001004 BlockConstPoolFor(1);
Steve Blocka7e24c12009-10-30 11:49:00 +00001005 return target_pos - (pc_offset() + kPcLoadDelta);
1006}
1007
1008
1009void Assembler::label_at_put(Label* L, int at_offset) {
1010 int target_pos;
1011 if (L->is_bound()) {
1012 target_pos = L->pos();
1013 } else {
1014 if (L->is_linked()) {
1015 target_pos = L->pos(); // L's link
1016 } else {
1017 target_pos = kEndOfChain;
1018 }
1019 L->link_to(at_offset);
1020 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1021 }
1022}
1023
1024
Andrei Popescu31002712010-02-23 13:46:05 +00001025// Branch instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001026void Assembler::b(int branch_offset, Condition cond) {
1027 ASSERT((branch_offset & 3) == 0);
1028 int imm24 = branch_offset >> 2;
1029 ASSERT(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +01001030 emit(cond | B27 | B25 | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001031
Steve Block6ded16b2010-05-10 14:33:55 +01001032 if (cond == al) {
Andrei Popescu31002712010-02-23 13:46:05 +00001033 // Dead code is a good location to emit the constant pool.
Steve Blocka7e24c12009-10-30 11:49:00 +00001034 CheckConstPool(false, false);
Steve Block6ded16b2010-05-10 14:33:55 +01001035 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001036}
1037
1038
1039void Assembler::bl(int branch_offset, Condition cond) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001040 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001041 ASSERT((branch_offset & 3) == 0);
1042 int imm24 = branch_offset >> 2;
1043 ASSERT(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +01001044 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001045}
1046
1047
1048void Assembler::blx(int branch_offset) { // v5 and above
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001049 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001050 ASSERT((branch_offset & 1) == 0);
1051 int h = ((branch_offset & 2) >> 1)*B24;
1052 int imm24 = branch_offset >> 2;
1053 ASSERT(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +01001054 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001055}
1056
1057
1058void Assembler::blx(Register target, Condition cond) { // v5 and above
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001059 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001060 ASSERT(!target.is(pc));
Steve Block1e0659c2011-05-24 12:43:12 +01001061 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001062}
1063
1064
1065void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001066 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001067 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
Steve Block1e0659c2011-05-24 12:43:12 +01001068 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001069}
1070
1071
Andrei Popescu31002712010-02-23 13:46:05 +00001072// Data-processing instructions.
1073
Steve Blocka7e24c12009-10-30 11:49:00 +00001074void Assembler::and_(Register dst, Register src1, const Operand& src2,
1075 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001076 addrmod1(cond | AND | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001077}
1078
1079
1080void Assembler::eor(Register dst, Register src1, const Operand& src2,
1081 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001082 addrmod1(cond | EOR | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001083}
1084
1085
1086void Assembler::sub(Register dst, Register src1, const Operand& src2,
1087 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001088 addrmod1(cond | SUB | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001089}
1090
1091
1092void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1093 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001094 addrmod1(cond | RSB | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001095}
1096
1097
1098void Assembler::add(Register dst, Register src1, const Operand& src2,
1099 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001100 addrmod1(cond | ADD | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001101}
1102
1103
1104void Assembler::adc(Register dst, Register src1, const Operand& src2,
1105 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001106 addrmod1(cond | ADC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001107}
1108
1109
1110void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1111 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001112 addrmod1(cond | SBC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001113}
1114
1115
1116void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1117 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001118 addrmod1(cond | RSC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001119}
1120
1121
1122void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001123 addrmod1(cond | TST | S, src1, r0, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001124}
1125
1126
1127void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001128 addrmod1(cond | TEQ | S, src1, r0, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001129}
1130
1131
1132void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001133 addrmod1(cond | CMP | S, src1, r0, src2);
1134}
1135
1136
1137void Assembler::cmp_raw_immediate(
1138 Register src, int raw_immediate, Condition cond) {
1139 ASSERT(is_uint12(raw_immediate));
1140 emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
Steve Blocka7e24c12009-10-30 11:49:00 +00001141}
1142
1143
1144void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001145 addrmod1(cond | CMN | S, src1, r0, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001146}
1147
1148
1149void Assembler::orr(Register dst, Register src1, const Operand& src2,
1150 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001151 addrmod1(cond | ORR | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001152}
1153
1154
1155void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1156 if (dst.is(pc)) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001157 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001158 }
Steve Block6ded16b2010-05-10 14:33:55 +01001159 // Don't allow nop instructions in the form mov rn, rn to be generated using
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001160 // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1161 // or MarkCode(int/NopMarkerTypes) pseudo instructions.
Steve Block6ded16b2010-05-10 14:33:55 +01001162 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
Steve Block1e0659c2011-05-24 12:43:12 +01001163 addrmod1(cond | MOV | s, r0, dst, src);
Steve Blocka7e24c12009-10-30 11:49:00 +00001164}
1165
1166
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001167void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1168 ASSERT(immediate < 0x10000);
1169 mov(reg, Operand(immediate), LeaveCC, cond);
1170}
1171
1172
1173void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1174 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1175}
1176
1177
Steve Blocka7e24c12009-10-30 11:49:00 +00001178void Assembler::bic(Register dst, Register src1, const Operand& src2,
1179 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001180 addrmod1(cond | BIC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001181}
1182
1183
1184void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001185 addrmod1(cond | MVN | s, r0, dst, src);
Steve Blocka7e24c12009-10-30 11:49:00 +00001186}
1187
1188
Andrei Popescu31002712010-02-23 13:46:05 +00001189// Multiply instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001190void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1191 SBit s, Condition cond) {
1192 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1193 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1194 src2.code()*B8 | B7 | B4 | src1.code());
1195}
1196
1197
1198void Assembler::mul(Register dst, Register src1, Register src2,
1199 SBit s, Condition cond) {
1200 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1201 // dst goes in bits 16-19 for this instruction!
1202 emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1203}
1204
1205
1206void Assembler::smlal(Register dstL,
1207 Register dstH,
1208 Register src1,
1209 Register src2,
1210 SBit s,
1211 Condition cond) {
1212 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1213 ASSERT(!dstL.is(dstH));
1214 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1215 src2.code()*B8 | B7 | B4 | src1.code());
1216}
1217
1218
1219void Assembler::smull(Register dstL,
1220 Register dstH,
1221 Register src1,
1222 Register src2,
1223 SBit s,
1224 Condition cond) {
1225 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1226 ASSERT(!dstL.is(dstH));
1227 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1228 src2.code()*B8 | B7 | B4 | src1.code());
1229}
1230
1231
1232void Assembler::umlal(Register dstL,
1233 Register dstH,
1234 Register src1,
1235 Register src2,
1236 SBit s,
1237 Condition cond) {
1238 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1239 ASSERT(!dstL.is(dstH));
1240 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1241 src2.code()*B8 | B7 | B4 | src1.code());
1242}
1243
1244
1245void Assembler::umull(Register dstL,
1246 Register dstH,
1247 Register src1,
1248 Register src2,
1249 SBit s,
1250 Condition cond) {
1251 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1252 ASSERT(!dstL.is(dstH));
1253 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1254 src2.code()*B8 | B7 | B4 | src1.code());
1255}
1256
1257
Andrei Popescu31002712010-02-23 13:46:05 +00001258// Miscellaneous arithmetic instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001259void Assembler::clz(Register dst, Register src, Condition cond) {
1260 // v5 and above.
1261 ASSERT(!dst.is(pc) && !src.is(pc));
1262 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
Steve Block1e0659c2011-05-24 12:43:12 +01001263 15*B8 | CLZ | src.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001264}
1265
1266
Kristian Monsen50ef84f2010-07-29 15:18:00 +01001267// Saturating instructions.
1268
1269// Unsigned saturate.
1270void Assembler::usat(Register dst,
1271 int satpos,
1272 const Operand& src,
1273 Condition cond) {
1274 // v6 and above.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001275 ASSERT(CpuFeatures::IsSupported(ARMv7));
Kristian Monsen50ef84f2010-07-29 15:18:00 +01001276 ASSERT(!dst.is(pc) && !src.rm_.is(pc));
1277 ASSERT((satpos >= 0) && (satpos <= 31));
1278 ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1279 ASSERT(src.rs_.is(no_reg));
1280
1281 int sh = 0;
1282 if (src.shift_op_ == ASR) {
1283 sh = 1;
1284 }
1285
1286 emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1287 src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1288}
1289
1290
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001291// Bitfield manipulation instructions.
1292
1293// Unsigned bit field extract.
1294// Extracts #width adjacent bits from position #lsb in a register, and
1295// writes them to the low bits of a destination register.
1296// ubfx dst, src, #lsb, #width
1297void Assembler::ubfx(Register dst,
1298 Register src,
1299 int lsb,
1300 int width,
1301 Condition cond) {
1302 // v7 and above.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001303 ASSERT(CpuFeatures::IsSupported(ARMv7));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001304 ASSERT(!dst.is(pc) && !src.is(pc));
1305 ASSERT((lsb >= 0) && (lsb <= 31));
1306 ASSERT((width >= 1) && (width <= (32 - lsb)));
1307 emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1308 lsb*B7 | B6 | B4 | src.code());
1309}
1310
1311
1312// Signed bit field extract.
1313// Extracts #width adjacent bits from position #lsb in a register, and
1314// writes them to the low bits of a destination register. The extracted
1315// value is sign extended to fill the destination register.
1316// sbfx dst, src, #lsb, #width
1317void Assembler::sbfx(Register dst,
1318 Register src,
1319 int lsb,
1320 int width,
1321 Condition cond) {
1322 // v7 and above.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001323 ASSERT(CpuFeatures::IsSupported(ARMv7));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001324 ASSERT(!dst.is(pc) && !src.is(pc));
1325 ASSERT((lsb >= 0) && (lsb <= 31));
1326 ASSERT((width >= 1) && (width <= (32 - lsb)));
1327 emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1328 lsb*B7 | B6 | B4 | src.code());
1329}
1330
1331
1332// Bit field clear.
1333// Sets #width adjacent bits at position #lsb in the destination register
1334// to zero, preserving the value of the other bits.
1335// bfc dst, #lsb, #width
1336void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1337 // v7 and above.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001338 ASSERT(CpuFeatures::IsSupported(ARMv7));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001339 ASSERT(!dst.is(pc));
1340 ASSERT((lsb >= 0) && (lsb <= 31));
1341 ASSERT((width >= 1) && (width <= (32 - lsb)));
1342 int msb = lsb + width - 1;
1343 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1344}
1345
1346
1347// Bit field insert.
1348// Inserts #width adjacent bits from the low bits of the source register
1349// into position #lsb of the destination register.
1350// bfi dst, src, #lsb, #width
1351void Assembler::bfi(Register dst,
1352 Register src,
1353 int lsb,
1354 int width,
1355 Condition cond) {
1356 // v7 and above.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001357 ASSERT(CpuFeatures::IsSupported(ARMv7));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001358 ASSERT(!dst.is(pc) && !src.is(pc));
1359 ASSERT((lsb >= 0) && (lsb <= 31));
1360 ASSERT((width >= 1) && (width <= (32 - lsb)));
1361 int msb = lsb + width - 1;
1362 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1363 src.code());
1364}
1365
1366
Andrei Popescu31002712010-02-23 13:46:05 +00001367// Status register access instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001368void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1369 ASSERT(!dst.is(pc));
1370 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1371}
1372
1373
1374void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1375 Condition cond) {
1376 ASSERT(fields >= B16 && fields < B20); // at least one field set
1377 Instr instr;
1378 if (!src.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001379 // Immediate.
Steve Blocka7e24c12009-10-30 11:49:00 +00001380 uint32_t rotate_imm;
1381 uint32_t immed_8;
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001382 if (src.must_use_constant_pool() ||
Steve Blocka7e24c12009-10-30 11:49:00 +00001383 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
Andrei Popescu31002712010-02-23 13:46:05 +00001384 // Immediate operand cannot be encoded, load it first to register ip.
Steve Blocka7e24c12009-10-30 11:49:00 +00001385 RecordRelocInfo(src.rmode_, src.imm32_);
1386 ldr(ip, MemOperand(pc, 0), cond);
1387 msr(fields, Operand(ip), cond);
1388 return;
1389 }
1390 instr = I | rotate_imm*B8 | immed_8;
1391 } else {
1392 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
1393 instr = src.rm_.code();
1394 }
1395 emit(cond | instr | B24 | B21 | fields | 15*B12);
1396}
1397
1398
Andrei Popescu31002712010-02-23 13:46:05 +00001399// Load/Store instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001400void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1401 if (dst.is(pc)) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001402 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001403 }
1404 addrmod2(cond | B26 | L, dst, src);
Steve Blocka7e24c12009-10-30 11:49:00 +00001405}
1406
1407
1408void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1409 addrmod2(cond | B26, src, dst);
Steve Blocka7e24c12009-10-30 11:49:00 +00001410}
1411
1412
1413void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1414 addrmod2(cond | B26 | B | L, dst, src);
1415}
1416
1417
1418void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1419 addrmod2(cond | B26 | B, src, dst);
1420}
1421
1422
1423void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1424 addrmod3(cond | L | B7 | H | B4, dst, src);
1425}
1426
1427
1428void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1429 addrmod3(cond | B7 | H | B4, src, dst);
1430}
1431
1432
1433void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1434 addrmod3(cond | L | B7 | S6 | B4, dst, src);
1435}
1436
1437
1438void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1439 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1440}
1441
1442
Leon Clarkef7060e22010-06-03 12:02:55 +01001443void Assembler::ldrd(Register dst1, Register dst2,
1444 const MemOperand& src, Condition cond) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001445 ASSERT(CpuFeatures::IsEnabled(ARMv7));
Kristian Monsen25f61362010-05-21 11:50:48 +01001446 ASSERT(src.rm().is(no_reg));
Leon Clarkef7060e22010-06-03 12:02:55 +01001447 ASSERT(!dst1.is(lr)); // r14.
1448 ASSERT_EQ(0, dst1.code() % 2);
1449 ASSERT_EQ(dst1.code() + 1, dst2.code());
1450 addrmod3(cond | B7 | B6 | B4, dst1, src);
Kristian Monsen25f61362010-05-21 11:50:48 +01001451}
1452
1453
Leon Clarkef7060e22010-06-03 12:02:55 +01001454void Assembler::strd(Register src1, Register src2,
1455 const MemOperand& dst, Condition cond) {
Kristian Monsen25f61362010-05-21 11:50:48 +01001456 ASSERT(dst.rm().is(no_reg));
Leon Clarkef7060e22010-06-03 12:02:55 +01001457 ASSERT(!src1.is(lr)); // r14.
1458 ASSERT_EQ(0, src1.code() % 2);
1459 ASSERT_EQ(src1.code() + 1, src2.code());
Ben Murdoch8b112d22011-06-08 16:22:53 +01001460 ASSERT(CpuFeatures::IsEnabled(ARMv7));
Leon Clarkef7060e22010-06-03 12:02:55 +01001461 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
Kristian Monsen25f61362010-05-21 11:50:48 +01001462}
1463
Andrei Popescu31002712010-02-23 13:46:05 +00001464// Load/Store multiple instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001465void Assembler::ldm(BlockAddrMode am,
1466 Register base,
1467 RegList dst,
1468 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00001469 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
Steve Blocka7e24c12009-10-30 11:49:00 +00001470 ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1471
1472 addrmod4(cond | B27 | am | L, base, dst);
1473
Andrei Popescu31002712010-02-23 13:46:05 +00001474 // Emit the constant pool after a function return implemented by ldm ..{..pc}.
Steve Blocka7e24c12009-10-30 11:49:00 +00001475 if (cond == al && (dst & pc.bit()) != 0) {
1476 // There is a slight chance that the ldm instruction was actually a call,
1477 // in which case it would be wrong to return into the constant pool; we
1478 // recognize this case by checking if the emission of the pool was blocked
1479 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1480 // the case, we emit a jump over the pool.
1481 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1482 }
1483}
1484
1485
1486void Assembler::stm(BlockAddrMode am,
1487 Register base,
1488 RegList src,
1489 Condition cond) {
1490 addrmod4(cond | B27 | am, base, src);
1491}
1492
1493
Andrei Popescu31002712010-02-23 13:46:05 +00001494// Exception-generating instructions and debugging support.
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001495// Stops with a non-negative code less than kNumOfWatchedStops support
1496// enabling/disabling and a counter feature. See simulator-arm.h .
1497void Assembler::stop(const char* msg, Condition cond, int32_t code) {
Andrei Popescu402d9372010-02-26 13:31:12 +00001498#ifndef __arm__
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001499 ASSERT(code >= kDefaultStopCode);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001500 {
1501 // The Simulator will handle the stop instruction and get the message
1502 // address. It expects to find the address just after the svc instruction.
1503 BlockConstPoolScope block_const_pool(this);
1504 if (code >= 0) {
1505 svc(kStopCode + code, cond);
1506 } else {
1507 svc(kStopCode + kMaxStopCode, cond);
1508 }
1509 emit(reinterpret_cast<Instr>(msg));
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001510 }
Andrei Popescu402d9372010-02-26 13:31:12 +00001511#else // def __arm__
1512#ifdef CAN_USE_ARMV5_INSTRUCTIONS
Steve Block1e0659c2011-05-24 12:43:12 +01001513 if (cond != al) {
1514 Label skip;
1515 b(&skip, NegateCondition(cond));
1516 bkpt(0);
1517 bind(&skip);
1518 } else {
1519 bkpt(0);
1520 }
Andrei Popescu402d9372010-02-26 13:31:12 +00001521#else // ndef CAN_USE_ARMV5_INSTRUCTIONS
Ben Murdochb0fe1622011-05-05 13:52:32 +01001522 svc(0x9f0001, cond);
Andrei Popescu402d9372010-02-26 13:31:12 +00001523#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
1524#endif // def __arm__
Steve Blocka7e24c12009-10-30 11:49:00 +00001525}
1526
1527
1528void Assembler::bkpt(uint32_t imm16) { // v5 and above
1529 ASSERT(is_uint16(imm16));
Steve Block1e0659c2011-05-24 12:43:12 +01001530 emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
Steve Blocka7e24c12009-10-30 11:49:00 +00001531}
1532
1533
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001534void Assembler::svc(uint32_t imm24, Condition cond) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001535 ASSERT(is_uint24(imm24));
1536 emit(cond | 15*B24 | imm24);
1537}
1538
1539
Andrei Popescu31002712010-02-23 13:46:05 +00001540// Coprocessor instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001541void Assembler::cdp(Coprocessor coproc,
1542 int opcode_1,
1543 CRegister crd,
1544 CRegister crn,
1545 CRegister crm,
1546 int opcode_2,
1547 Condition cond) {
1548 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1549 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1550 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1551}
1552
1553
1554void Assembler::cdp2(Coprocessor coproc,
1555 int opcode_1,
1556 CRegister crd,
1557 CRegister crn,
1558 CRegister crm,
1559 int opcode_2) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01001560 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00001561}
1562
1563
1564void Assembler::mcr(Coprocessor coproc,
1565 int opcode_1,
1566 Register rd,
1567 CRegister crn,
1568 CRegister crm,
1569 int opcode_2,
1570 Condition cond) {
1571 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1572 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1573 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1574}
1575
1576
1577void Assembler::mcr2(Coprocessor coproc,
1578 int opcode_1,
1579 Register rd,
1580 CRegister crn,
1581 CRegister crm,
1582 int opcode_2) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01001583 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00001584}
1585
1586
1587void Assembler::mrc(Coprocessor coproc,
1588 int opcode_1,
1589 Register rd,
1590 CRegister crn,
1591 CRegister crm,
1592 int opcode_2,
1593 Condition cond) {
1594 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1595 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1596 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1597}
1598
1599
1600void Assembler::mrc2(Coprocessor coproc,
1601 int opcode_1,
1602 Register rd,
1603 CRegister crn,
1604 CRegister crm,
1605 int opcode_2) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01001606 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00001607}
1608
1609
1610void Assembler::ldc(Coprocessor coproc,
1611 CRegister crd,
1612 const MemOperand& src,
1613 LFlag l,
1614 Condition cond) {
1615 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1616}
1617
1618
1619void Assembler::ldc(Coprocessor coproc,
1620 CRegister crd,
1621 Register rn,
1622 int option,
1623 LFlag l,
1624 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00001625 // Unindexed addressing.
Steve Blocka7e24c12009-10-30 11:49:00 +00001626 ASSERT(is_uint8(option));
1627 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1628 coproc*B8 | (option & 255));
1629}
1630
1631
1632void Assembler::ldc2(Coprocessor coproc,
1633 CRegister crd,
1634 const MemOperand& src,
1635 LFlag l) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01001636 ldc(coproc, crd, src, l, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00001637}
1638
1639
1640void Assembler::ldc2(Coprocessor coproc,
1641 CRegister crd,
1642 Register rn,
1643 int option,
1644 LFlag l) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01001645 ldc(coproc, crd, rn, option, l, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00001646}
1647
1648
Steve Blockd0582a62009-12-15 09:54:21 +00001649// Support for VFP.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001650
Leon Clarked91b9f72010-01-27 17:25:45 +00001651void Assembler::vldr(const DwVfpRegister dst,
1652 const Register base,
1653 int offset,
1654 const Condition cond) {
1655 // Ddst = MEM(Rbase + offset).
1656 // Instruction details available in ARM DDI 0406A, A8-628.
Ben Murdochb0fe1622011-05-05 13:52:32 +01001657 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
Leon Clarked91b9f72010-01-27 17:25:45 +00001658 // Vdst(15-12) | 1011(11-8) | offset
Ben Murdoch8b112d22011-06-08 16:22:53 +01001659 ASSERT(CpuFeatures::IsEnabled(VFP3));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001660 int u = 1;
1661 if (offset < 0) {
1662 offset = -offset;
1663 u = 0;
1664 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001665
Iain Merrick75681382010-08-19 15:07:18 +01001666 ASSERT(offset >= 0);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001667 if ((offset % 4) == 0 && (offset / 4) < 256) {
1668 emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
1669 0xB*B8 | ((offset / 4) & 255));
1670 } else {
1671 // Larger offsets must be handled by computing the correct address
1672 // in the ip register.
1673 ASSERT(!base.is(ip));
1674 if (u == 1) {
1675 add(ip, base, Operand(offset));
1676 } else {
1677 sub(ip, base, Operand(offset));
1678 }
1679 emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
1680 }
1681}
1682
1683
1684void Assembler::vldr(const DwVfpRegister dst,
1685 const MemOperand& operand,
1686 const Condition cond) {
1687 ASSERT(!operand.rm().is_valid());
1688 ASSERT(operand.am_ == Offset);
1689 vldr(dst, operand.rn(), operand.offset(), cond);
Leon Clarked91b9f72010-01-27 17:25:45 +00001690}
1691
1692
Steve Block6ded16b2010-05-10 14:33:55 +01001693void Assembler::vldr(const SwVfpRegister dst,
1694 const Register base,
1695 int offset,
1696 const Condition cond) {
1697 // Sdst = MEM(Rbase + offset).
1698 // Instruction details available in ARM DDI 0406A, A8-628.
Ben Murdochb0fe1622011-05-05 13:52:32 +01001699 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
Steve Block6ded16b2010-05-10 14:33:55 +01001700 // Vdst(15-12) | 1010(11-8) | offset
Ben Murdoch8b112d22011-06-08 16:22:53 +01001701 ASSERT(CpuFeatures::IsEnabled(VFP3));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001702 int u = 1;
1703 if (offset < 0) {
1704 offset = -offset;
1705 u = 0;
1706 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001707 int sd, d;
1708 dst.split_code(&sd, &d);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001709 ASSERT(offset >= 0);
1710
1711 if ((offset % 4) == 0 && (offset / 4) < 256) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001712 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
Steve Block6ded16b2010-05-10 14:33:55 +01001713 0xA*B8 | ((offset / 4) & 255));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001714 } else {
1715 // Larger offsets must be handled by computing the correct address
1716 // in the ip register.
1717 ASSERT(!base.is(ip));
1718 if (u == 1) {
1719 add(ip, base, Operand(offset));
1720 } else {
1721 sub(ip, base, Operand(offset));
1722 }
1723 emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
1724 }
1725}
1726
1727
1728void Assembler::vldr(const SwVfpRegister dst,
1729 const MemOperand& operand,
1730 const Condition cond) {
1731 ASSERT(!operand.rm().is_valid());
1732 ASSERT(operand.am_ == Offset);
1733 vldr(dst, operand.rn(), operand.offset(), cond);
Steve Block6ded16b2010-05-10 14:33:55 +01001734}
1735
1736
Leon Clarked91b9f72010-01-27 17:25:45 +00001737void Assembler::vstr(const DwVfpRegister src,
1738 const Register base,
1739 int offset,
1740 const Condition cond) {
1741 // MEM(Rbase + offset) = Dsrc.
1742 // Instruction details available in ARM DDI 0406A, A8-786.
Ben Murdochb0fe1622011-05-05 13:52:32 +01001743 // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
Leon Clarked91b9f72010-01-27 17:25:45 +00001744 // Vsrc(15-12) | 1011(11-8) | (offset/4)
Ben Murdoch8b112d22011-06-08 16:22:53 +01001745 ASSERT(CpuFeatures::IsEnabled(VFP3));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001746 int u = 1;
1747 if (offset < 0) {
1748 offset = -offset;
1749 u = 0;
1750 }
Iain Merrick75681382010-08-19 15:07:18 +01001751 ASSERT(offset >= 0);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001752 if ((offset % 4) == 0 && (offset / 4) < 256) {
1753 emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
1754 0xB*B8 | ((offset / 4) & 255));
1755 } else {
1756 // Larger offsets must be handled by computing the correct address
1757 // in the ip register.
1758 ASSERT(!base.is(ip));
1759 if (u == 1) {
1760 add(ip, base, Operand(offset));
1761 } else {
1762 sub(ip, base, Operand(offset));
1763 }
1764 emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
1765 }
1766}
1767
1768
1769void Assembler::vstr(const DwVfpRegister src,
1770 const MemOperand& operand,
1771 const Condition cond) {
1772 ASSERT(!operand.rm().is_valid());
1773 ASSERT(operand.am_ == Offset);
1774 vstr(src, operand.rn(), operand.offset(), cond);
Leon Clarked91b9f72010-01-27 17:25:45 +00001775}
1776
1777
Iain Merrick75681382010-08-19 15:07:18 +01001778void Assembler::vstr(const SwVfpRegister src,
1779 const Register base,
1780 int offset,
1781 const Condition cond) {
1782 // MEM(Rbase + offset) = SSrc.
1783 // Instruction details available in ARM DDI 0406A, A8-786.
Ben Murdochb0fe1622011-05-05 13:52:32 +01001784 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
Iain Merrick75681382010-08-19 15:07:18 +01001785 // Vdst(15-12) | 1010(11-8) | (offset/4)
Ben Murdoch8b112d22011-06-08 16:22:53 +01001786 ASSERT(CpuFeatures::IsEnabled(VFP3));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001787 int u = 1;
1788 if (offset < 0) {
1789 offset = -offset;
1790 u = 0;
1791 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001792 int sd, d;
1793 src.split_code(&sd, &d);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001794 ASSERT(offset >= 0);
1795 if ((offset % 4) == 0 && (offset / 4) < 256) {
1796 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
1797 0xA*B8 | ((offset / 4) & 255));
1798 } else {
1799 // Larger offsets must be handled by computing the correct address
1800 // in the ip register.
1801 ASSERT(!base.is(ip));
1802 if (u == 1) {
1803 add(ip, base, Operand(offset));
1804 } else {
1805 sub(ip, base, Operand(offset));
1806 }
1807 emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
1808 }
1809}
1810
1811
1812void Assembler::vstr(const SwVfpRegister src,
1813 const MemOperand& operand,
1814 const Condition cond) {
1815 ASSERT(!operand.rm().is_valid());
1816 ASSERT(operand.am_ == Offset);
1817 vldr(src, operand.rn(), operand.offset(), cond);
Iain Merrick75681382010-08-19 15:07:18 +01001818}
1819
1820
Ben Murdoch8b112d22011-06-08 16:22:53 +01001821void Assembler::vldm(BlockAddrMode am,
1822 Register base,
1823 DwVfpRegister first,
1824 DwVfpRegister last,
1825 Condition cond) {
1826 // Instruction details available in ARM DDI 0406A, A8-626.
1827 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
1828 // first(15-12) | 1010(11-8) | (count * 2)
1829 ASSERT(CpuFeatures::IsEnabled(VFP3));
1830 ASSERT_LE(first.code(), last.code());
1831 ASSERT(am == ia || am == ia_w || am == db_w);
1832 ASSERT(!base.is(pc));
1833
1834 int sd, d;
1835 first.split_code(&sd, &d);
1836 int count = last.code() - first.code() + 1;
1837 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
1838 0xB*B8 | count*2);
1839}
1840
1841
1842void Assembler::vstm(BlockAddrMode am,
1843 Register base,
1844 DwVfpRegister first,
1845 DwVfpRegister last,
1846 Condition cond) {
1847 // Instruction details available in ARM DDI 0406A, A8-784.
1848 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
1849 // first(15-12) | 1011(11-8) | (count * 2)
1850 ASSERT(CpuFeatures::IsEnabled(VFP3));
1851 ASSERT_LE(first.code(), last.code());
1852 ASSERT(am == ia || am == ia_w || am == db_w);
1853 ASSERT(!base.is(pc));
1854
1855 int sd, d;
1856 first.split_code(&sd, &d);
1857 int count = last.code() - first.code() + 1;
1858 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
1859 0xB*B8 | count*2);
1860}
1861
1862void Assembler::vldm(BlockAddrMode am,
1863 Register base,
1864 SwVfpRegister first,
1865 SwVfpRegister last,
1866 Condition cond) {
1867 // Instruction details available in ARM DDI 0406A, A8-626.
1868 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
1869 // first(15-12) | 1010(11-8) | (count/2)
1870 ASSERT(CpuFeatures::IsEnabled(VFP3));
1871 ASSERT_LE(first.code(), last.code());
1872 ASSERT(am == ia || am == ia_w || am == db_w);
1873 ASSERT(!base.is(pc));
1874
1875 int sd, d;
1876 first.split_code(&sd, &d);
1877 int count = last.code() - first.code() + 1;
1878 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
1879 0xA*B8 | count);
1880}
1881
1882
1883void Assembler::vstm(BlockAddrMode am,
1884 Register base,
1885 SwVfpRegister first,
1886 SwVfpRegister last,
1887 Condition cond) {
1888 // Instruction details available in ARM DDI 0406A, A8-784.
1889 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
1890 // first(15-12) | 1011(11-8) | (count/2)
1891 ASSERT(CpuFeatures::IsEnabled(VFP3));
1892 ASSERT_LE(first.code(), last.code());
1893 ASSERT(am == ia || am == ia_w || am == db_w);
1894 ASSERT(!base.is(pc));
1895
1896 int sd, d;
1897 first.split_code(&sd, &d);
1898 int count = last.code() - first.code() + 1;
1899 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
1900 0xA*B8 | count);
1901}
1902
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001903static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
1904 uint64_t i;
1905 memcpy(&i, &d, 8);
1906
1907 *lo = i & 0xffffffff;
1908 *hi = i >> 32;
1909}
1910
1911// Only works for little endian floating point formats.
1912// We don't support VFP on the mixed endian floating point platform.
1913static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001914 ASSERT(CpuFeatures::IsEnabled(VFP3));
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001915
1916 // VMOV can accept an immediate of the form:
1917 //
1918 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
1919 //
1920 // The immediate is encoded using an 8-bit quantity, comprised of two
1921 // 4-bit fields. For an 8-bit immediate of the form:
1922 //
1923 // [abcdefgh]
1924 //
1925 // where a is the MSB and h is the LSB, an immediate 64-bit double can be
1926 // created of the form:
1927 //
1928 // [aBbbbbbb,bbcdefgh,00000000,00000000,
1929 // 00000000,00000000,00000000,00000000]
1930 //
1931 // where B = ~b.
1932 //
1933
1934 uint32_t lo, hi;
1935 DoubleAsTwoUInt32(d, &lo, &hi);
1936
1937 // The most obvious constraint is the long block of zeroes.
1938 if ((lo != 0) || ((hi & 0xffff) != 0)) {
1939 return false;
1940 }
1941
1942 // Bits 62:55 must be all clear or all set.
1943 if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
1944 return false;
1945 }
1946
1947 // Bit 63 must be NOT bit 62.
1948 if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
1949 return false;
1950 }
1951
1952 // Create the encoded immediate in the form:
1953 // [00000000,0000abcd,00000000,0000efgh]
1954 *encoding = (hi >> 16) & 0xf; // Low nybble.
1955 *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
1956 *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
1957
1958 return true;
1959}
1960
1961
1962void Assembler::vmov(const DwVfpRegister dst,
1963 double imm,
1964 const Condition cond) {
1965 // Dd = immediate
1966 // Instruction details available in ARM DDI 0406B, A8-640.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001967 ASSERT(CpuFeatures::IsEnabled(VFP3));
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001968
1969 uint32_t enc;
1970 if (FitsVMOVDoubleImmediate(imm, &enc)) {
1971 // The double can be encoded in the instruction.
1972 emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
1973 } else {
1974 // Synthesise the double from ARM immediates. This could be implemented
1975 // using vldr from a constant pool.
1976 uint32_t lo, hi;
1977 DoubleAsTwoUInt32(imm, &lo, &hi);
1978
1979 if (lo == hi) {
1980 // If the lo and hi parts of the double are equal, the literal is easier
1981 // to create. This is the case with 0.0.
1982 mov(ip, Operand(lo));
1983 vmov(dst, ip, ip);
1984 } else {
1985 // Move the low part of the double into the lower of the corresponsing S
1986 // registers of D register dst.
1987 mov(ip, Operand(lo));
1988 vmov(dst.low(), ip, cond);
1989
1990 // Move the high part of the double into the higher of the corresponsing S
1991 // registers of D register dst.
1992 mov(ip, Operand(hi));
1993 vmov(dst.high(), ip, cond);
1994 }
1995 }
1996}
1997
1998
1999void Assembler::vmov(const SwVfpRegister dst,
2000 const SwVfpRegister src,
2001 const Condition cond) {
2002 // Sd = Sm
2003 // Instruction details available in ARM DDI 0406B, A8-642.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002004 ASSERT(CpuFeatures::IsEnabled(VFP3));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002005 int sd, d, sm, m;
2006 dst.split_code(&sd, &d);
2007 src.split_code(&sm, &m);
2008 emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002009}
2010
2011
Leon Clarkee46be812010-01-19 14:06:41 +00002012void Assembler::vmov(const DwVfpRegister dst,
Steve Block8defd9f2010-07-08 12:39:36 +01002013 const DwVfpRegister src,
2014 const Condition cond) {
2015 // Dd = Dm
2016 // Instruction details available in ARM DDI 0406B, A8-642.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002017 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Block8defd9f2010-07-08 12:39:36 +01002018 emit(cond | 0xE*B24 | 0xB*B20 |
2019 dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
2020}
2021
2022
2023void Assembler::vmov(const DwVfpRegister dst,
Leon Clarkee46be812010-01-19 14:06:41 +00002024 const Register src1,
2025 const Register src2,
2026 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00002027 // Dm = <Rt,Rt2>.
2028 // Instruction details available in ARM DDI 0406A, A8-646.
2029 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2030 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
Ben Murdoch8b112d22011-06-08 16:22:53 +01002031 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002032 ASSERT(!src1.is(pc) && !src2.is(pc));
2033 emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2034 src1.code()*B12 | 0xB*B8 | B4 | dst.code());
2035}
2036
2037
Leon Clarkee46be812010-01-19 14:06:41 +00002038void Assembler::vmov(const Register dst1,
2039 const Register dst2,
2040 const DwVfpRegister src,
2041 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00002042 // <Rt,Rt2> = Dm.
2043 // Instruction details available in ARM DDI 0406A, A8-646.
2044 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2045 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
Ben Murdoch8b112d22011-06-08 16:22:53 +01002046 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002047 ASSERT(!dst1.is(pc) && !dst2.is(pc));
2048 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2049 dst1.code()*B12 | 0xB*B8 | B4 | src.code());
2050}
2051
2052
Leon Clarkee46be812010-01-19 14:06:41 +00002053void Assembler::vmov(const SwVfpRegister dst,
Steve Blockd0582a62009-12-15 09:54:21 +00002054 const Register src,
Steve Blockd0582a62009-12-15 09:54:21 +00002055 const Condition cond) {
2056 // Sn = Rt.
2057 // Instruction details available in ARM DDI 0406A, A8-642.
2058 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2059 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
Ben Murdoch8b112d22011-06-08 16:22:53 +01002060 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002061 ASSERT(!src.is(pc));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002062 int sn, n;
2063 dst.split_code(&sn, &n);
2064 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
Steve Blockd0582a62009-12-15 09:54:21 +00002065}
2066
2067
Leon Clarkee46be812010-01-19 14:06:41 +00002068void Assembler::vmov(const Register dst,
2069 const SwVfpRegister src,
Steve Blockd0582a62009-12-15 09:54:21 +00002070 const Condition cond) {
2071 // Rt = Sn.
2072 // Instruction details available in ARM DDI 0406A, A8-642.
2073 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2074 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
Ben Murdoch8b112d22011-06-08 16:22:53 +01002075 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002076 ASSERT(!dst.is(pc));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002077 int sn, n;
2078 src.split_code(&sn, &n);
2079 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
Steve Blockd0582a62009-12-15 09:54:21 +00002080}
2081
2082
Steve Block6ded16b2010-05-10 14:33:55 +01002083// Type of data to read from or write to VFP register.
2084// Used as specifier in generic vcvt instruction.
2085enum VFPType { S32, U32, F32, F64 };
2086
2087
2088static bool IsSignedVFPType(VFPType type) {
2089 switch (type) {
2090 case S32:
2091 return true;
2092 case U32:
2093 return false;
2094 default:
2095 UNREACHABLE();
2096 return false;
2097 }
Steve Blockd0582a62009-12-15 09:54:21 +00002098}
2099
2100
Steve Block6ded16b2010-05-10 14:33:55 +01002101static bool IsIntegerVFPType(VFPType type) {
2102 switch (type) {
2103 case S32:
2104 case U32:
2105 return true;
2106 case F32:
2107 case F64:
2108 return false;
2109 default:
2110 UNREACHABLE();
2111 return false;
2112 }
2113}
2114
2115
2116static bool IsDoubleVFPType(VFPType type) {
2117 switch (type) {
2118 case F32:
2119 return false;
2120 case F64:
2121 return true;
2122 default:
2123 UNREACHABLE();
2124 return false;
2125 }
2126}
2127
2128
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002129// Split five bit reg_code based on size of reg_type.
2130// 32-bit register codes are Vm:M
2131// 64-bit register codes are M:Vm
2132// where Vm is four bits, and M is a single bit.
2133static void SplitRegCode(VFPType reg_type,
Steve Block6ded16b2010-05-10 14:33:55 +01002134 int reg_code,
2135 int* vm,
2136 int* m) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002137 ASSERT((reg_code >= 0) && (reg_code <= 31));
2138 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2139 // 32 bit type.
Steve Block6ded16b2010-05-10 14:33:55 +01002140 *m = reg_code & 0x1;
2141 *vm = reg_code >> 1;
2142 } else {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002143 // 64 bit type.
Steve Block6ded16b2010-05-10 14:33:55 +01002144 *m = (reg_code & 0x10) >> 4;
2145 *vm = reg_code & 0x0F;
2146 }
2147}
2148
2149
2150// Encode vcvt.src_type.dst_type instruction.
2151static Instr EncodeVCVT(const VFPType dst_type,
2152 const int dst_code,
2153 const VFPType src_type,
2154 const int src_code,
Steve Block1e0659c2011-05-24 12:43:12 +01002155 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002156 const Condition cond) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002157 ASSERT(src_type != dst_type);
2158 int D, Vd, M, Vm;
2159 SplitRegCode(src_type, src_code, &Vm, &M);
2160 SplitRegCode(dst_type, dst_code, &Vd, &D);
2161
Steve Block6ded16b2010-05-10 14:33:55 +01002162 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2163 // Conversion between IEEE floating point and 32-bit integer.
2164 // Instruction details available in ARM DDI 0406B, A8.6.295.
2165 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2166 // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2167 ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2168
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002169 int sz, opc2, op;
Steve Block6ded16b2010-05-10 14:33:55 +01002170
2171 if (IsIntegerVFPType(dst_type)) {
2172 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2173 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
Russell Brenner90bac252010-11-18 13:33:46 -08002174 op = mode;
Steve Block6ded16b2010-05-10 14:33:55 +01002175 } else {
2176 ASSERT(IsIntegerVFPType(src_type));
Steve Block6ded16b2010-05-10 14:33:55 +01002177 opc2 = 0x0;
2178 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2179 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
Steve Block6ded16b2010-05-10 14:33:55 +01002180 }
2181
2182 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2183 Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2184 } else {
2185 // Conversion between IEEE double and single precision.
2186 // Instruction details available in ARM DDI 0406B, A8.6.298.
2187 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2188 // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002189 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
Steve Block6ded16b2010-05-10 14:33:55 +01002190 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2191 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2192 }
2193}
2194
2195
2196void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2197 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002198 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002199 const Condition cond) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002200 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002201 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002202}
2203
2204
2205void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2206 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002207 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002208 const Condition cond) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002209 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002210 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002211}
2212
2213
2214void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2215 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002216 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002217 const Condition cond) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002218 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002219 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002220}
2221
2222
2223void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2224 const DwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002225 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002226 const Condition cond) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002227 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002228 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002229}
2230
2231
2232void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2233 const DwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002234 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002235 const Condition cond) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002236 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002237 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002238}
2239
2240
2241void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2242 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002243 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002244 const Condition cond) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002245 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002246 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002247}
2248
2249
2250void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2251 const DwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002252 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002253 const Condition cond) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002254 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002255 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
Steve Blockd0582a62009-12-15 09:54:21 +00002256}
2257
2258
Steve Block44f0eee2011-05-26 01:26:41 +01002259void Assembler::vneg(const DwVfpRegister dst,
2260 const DwVfpRegister src,
2261 const Condition cond) {
2262 emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
2263 0x5*B9 | B8 | B6 | src.code());
2264}
2265
2266
Steve Block1e0659c2011-05-24 12:43:12 +01002267void Assembler::vabs(const DwVfpRegister dst,
2268 const DwVfpRegister src,
2269 const Condition cond) {
2270 emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
2271 0x5*B9 | B8 | 0x3*B6 | src.code());
2272}
2273
2274
Leon Clarkee46be812010-01-19 14:06:41 +00002275void Assembler::vadd(const DwVfpRegister dst,
2276 const DwVfpRegister src1,
2277 const DwVfpRegister src2,
2278 const Condition cond) {
2279 // Dd = vadd(Dn, Dm) double precision floating point addition.
Steve Blockd0582a62009-12-15 09:54:21 +00002280 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2281 // Instruction details available in ARM DDI 0406A, A8-536.
2282 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2283 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
Ben Murdoch8b112d22011-06-08 16:22:53 +01002284 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002285 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2286 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2287}
2288
2289
Leon Clarkee46be812010-01-19 14:06:41 +00002290void Assembler::vsub(const DwVfpRegister dst,
2291 const DwVfpRegister src1,
2292 const DwVfpRegister src2,
2293 const Condition cond) {
2294 // Dd = vsub(Dn, Dm) double precision floating point subtraction.
Steve Blockd0582a62009-12-15 09:54:21 +00002295 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2296 // Instruction details available in ARM DDI 0406A, A8-784.
2297 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2298 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
Ben Murdoch8b112d22011-06-08 16:22:53 +01002299 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002300 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2301 dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2302}
2303
2304
Leon Clarkee46be812010-01-19 14:06:41 +00002305void Assembler::vmul(const DwVfpRegister dst,
2306 const DwVfpRegister src1,
2307 const DwVfpRegister src2,
2308 const Condition cond) {
2309 // Dd = vmul(Dn, Dm) double precision floating point multiplication.
Steve Blockd0582a62009-12-15 09:54:21 +00002310 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2311 // Instruction details available in ARM DDI 0406A, A8-784.
2312 // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
2313 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
Ben Murdoch8b112d22011-06-08 16:22:53 +01002314 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002315 emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
2316 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2317}
2318
2319
Leon Clarkee46be812010-01-19 14:06:41 +00002320void Assembler::vdiv(const DwVfpRegister dst,
2321 const DwVfpRegister src1,
2322 const DwVfpRegister src2,
2323 const Condition cond) {
2324 // Dd = vdiv(Dn, Dm) double precision floating point division.
Steve Blockd0582a62009-12-15 09:54:21 +00002325 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2326 // Instruction details available in ARM DDI 0406A, A8-584.
2327 // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
2328 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
Ben Murdoch8b112d22011-06-08 16:22:53 +01002329 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002330 emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
2331 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2332}
2333
2334
Leon Clarkee46be812010-01-19 14:06:41 +00002335void Assembler::vcmp(const DwVfpRegister src1,
2336 const DwVfpRegister src2,
Steve Blockd0582a62009-12-15 09:54:21 +00002337 const Condition cond) {
2338 // vcmp(Dd, Dm) double precision floating point comparison.
2339 // Instruction details available in ARM DDI 0406A, A8-570.
2340 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
Ben Murdochb8e0da22011-05-16 14:20:40 +01002341 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
Ben Murdoch8b112d22011-06-08 16:22:53 +01002342 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002343 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
Ben Murdochb8e0da22011-05-16 14:20:40 +01002344 src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
Steve Blockd0582a62009-12-15 09:54:21 +00002345}
2346
2347
Iain Merrick75681382010-08-19 15:07:18 +01002348void Assembler::vcmp(const DwVfpRegister src1,
2349 const double src2,
Iain Merrick75681382010-08-19 15:07:18 +01002350 const Condition cond) {
2351 // vcmp(Dd, Dm) double precision floating point comparison.
2352 // Instruction details available in ARM DDI 0406A, A8-570.
2353 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
Ben Murdochb8e0da22011-05-16 14:20:40 +01002354 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
Ben Murdoch8b112d22011-06-08 16:22:53 +01002355 ASSERT(CpuFeatures::IsEnabled(VFP3));
Iain Merrick75681382010-08-19 15:07:18 +01002356 ASSERT(src2 == 0.0);
2357 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
Ben Murdochb8e0da22011-05-16 14:20:40 +01002358 src1.code()*B12 | 0x5*B9 | B8 | B6);
Iain Merrick75681382010-08-19 15:07:18 +01002359}
2360
2361
Russell Brenner90bac252010-11-18 13:33:46 -08002362void Assembler::vmsr(Register dst, Condition cond) {
2363 // Instruction details available in ARM DDI 0406A, A8-652.
2364 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
2365 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
Ben Murdoch8b112d22011-06-08 16:22:53 +01002366 ASSERT(CpuFeatures::IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002367 emit(cond | 0xE*B24 | 0xE*B20 | B16 |
2368 dst.code()*B12 | 0xA*B8 | B4);
2369}
2370
2371
Steve Blockd0582a62009-12-15 09:54:21 +00002372void Assembler::vmrs(Register dst, Condition cond) {
2373 // Instruction details available in ARM DDI 0406A, A8-652.
2374 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
2375 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
Ben Murdoch8b112d22011-06-08 16:22:53 +01002376 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002377 emit(cond | 0xE*B24 | 0xF*B20 | B16 |
2378 dst.code()*B12 | 0xA*B8 | B4);
2379}
2380
2381
Steve Block8defd9f2010-07-08 12:39:36 +01002382void Assembler::vsqrt(const DwVfpRegister dst,
2383 const DwVfpRegister src,
2384 const Condition cond) {
2385 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
2386 // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
Ben Murdoch8b112d22011-06-08 16:22:53 +01002387 ASSERT(CpuFeatures::IsEnabled(VFP3));
Steve Block8defd9f2010-07-08 12:39:36 +01002388 emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
2389 dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
2390}
2391
2392
Andrei Popescu31002712010-02-23 13:46:05 +00002393// Pseudo instructions.
Steve Block6ded16b2010-05-10 14:33:55 +01002394void Assembler::nop(int type) {
2395 // This is mov rx, rx.
2396 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2397 emit(al | 13*B21 | type*B12 | type);
2398}
2399
2400
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002401bool Assembler::IsNop(Instr instr, int type) {
Steve Block1e0659c2011-05-24 12:43:12 +01002402 // Check for mov rx, rx where x = type.
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002403 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2404 return instr == (al | 13*B21 | type*B12 | type);
2405}
2406
2407
Steve Blockd0582a62009-12-15 09:54:21 +00002408bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
2409 uint32_t dummy1;
2410 uint32_t dummy2;
2411 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
2412}
2413
2414
Andrei Popescu31002712010-02-23 13:46:05 +00002415// Debugging.
Steve Blocka7e24c12009-10-30 11:49:00 +00002416void Assembler::RecordJSReturn() {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08002417 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00002418 CheckBuffer();
2419 RecordRelocInfo(RelocInfo::JS_RETURN);
2420}
2421
2422
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002423void Assembler::RecordDebugBreakSlot() {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08002424 positions_recorder()->WriteRecordedPositions();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002425 CheckBuffer();
2426 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2427}
2428
2429
Steve Blocka7e24c12009-10-30 11:49:00 +00002430void Assembler::RecordComment(const char* msg) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01002431 if (FLAG_code_comments) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002432 CheckBuffer();
2433 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2434 }
2435}
2436
2437
Steve Blocka7e24c12009-10-30 11:49:00 +00002438void Assembler::GrowBuffer() {
2439 if (!own_buffer_) FATAL("external code buffer is too small");
2440
Andrei Popescu31002712010-02-23 13:46:05 +00002441 // Compute new buffer size.
Steve Blocka7e24c12009-10-30 11:49:00 +00002442 CodeDesc desc; // the new buffer
2443 if (buffer_size_ < 4*KB) {
2444 desc.buffer_size = 4*KB;
2445 } else if (buffer_size_ < 1*MB) {
2446 desc.buffer_size = 2*buffer_size_;
2447 } else {
2448 desc.buffer_size = buffer_size_ + 1*MB;
2449 }
2450 CHECK_GT(desc.buffer_size, 0); // no overflow
2451
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002452 // Set up new buffer.
Steve Blocka7e24c12009-10-30 11:49:00 +00002453 desc.buffer = NewArray<byte>(desc.buffer_size);
2454
2455 desc.instr_size = pc_offset();
2456 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2457
Andrei Popescu31002712010-02-23 13:46:05 +00002458 // Copy the data.
Steve Blocka7e24c12009-10-30 11:49:00 +00002459 int pc_delta = desc.buffer - buffer_;
2460 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2461 memmove(desc.buffer, buffer_, desc.instr_size);
2462 memmove(reloc_info_writer.pos() + rc_delta,
2463 reloc_info_writer.pos(), desc.reloc_size);
2464
Andrei Popescu31002712010-02-23 13:46:05 +00002465 // Switch buffers.
Steve Blocka7e24c12009-10-30 11:49:00 +00002466 DeleteArray(buffer_);
2467 buffer_ = desc.buffer;
2468 buffer_size_ = desc.buffer_size;
2469 pc_ += pc_delta;
2470 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2471 reloc_info_writer.last_pc() + pc_delta);
2472
Andrei Popescu31002712010-02-23 13:46:05 +00002473 // None of our relocation types are pc relative pointing outside the code
Steve Blocka7e24c12009-10-30 11:49:00 +00002474 // buffer nor pc absolute pointing inside the code buffer, so there is no need
Andrei Popescu31002712010-02-23 13:46:05 +00002475 // to relocate any emitted relocation entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00002476
Andrei Popescu31002712010-02-23 13:46:05 +00002477 // Relocate pending relocation entries.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002478 for (int i = 0; i < num_pending_reloc_info_; i++) {
2479 RelocInfo& rinfo = pending_reloc_info_[i];
Steve Blocka7e24c12009-10-30 11:49:00 +00002480 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2481 rinfo.rmode() != RelocInfo::POSITION);
2482 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
2483 rinfo.set_pc(rinfo.pc() + pc_delta);
2484 }
2485 }
2486}
2487
2488
Ben Murdochb0fe1622011-05-05 13:52:32 +01002489void Assembler::db(uint8_t data) {
Ben Murdochb8e0da22011-05-16 14:20:40 +01002490 // No relocation info should be pending while using db. db is used
2491 // to write pure data with no pointers and the constant pool should
2492 // be emitted before using db.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002493 ASSERT(num_pending_reloc_info_ == 0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002494 CheckBuffer();
2495 *reinterpret_cast<uint8_t*>(pc_) = data;
2496 pc_ += sizeof(uint8_t);
2497}
2498
2499
2500void Assembler::dd(uint32_t data) {
Ben Murdochb8e0da22011-05-16 14:20:40 +01002501 // No relocation info should be pending while using dd. dd is used
2502 // to write pure data with no pointers and the constant pool should
2503 // be emitted before using dd.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002504 ASSERT(num_pending_reloc_info_ == 0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002505 CheckBuffer();
2506 *reinterpret_cast<uint32_t*>(pc_) = data;
2507 pc_ += sizeof(uint32_t);
2508}
2509
2510
Steve Blocka7e24c12009-10-30 11:49:00 +00002511void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002512 // We do not try to reuse pool constants.
2513 RelocInfo rinfo(pc_, rmode, data, NULL);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002514 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
Andrei Popescu31002712010-02-23 13:46:05 +00002515 // Adjust code for new modes.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002516 ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2517 || RelocInfo::IsJSReturn(rmode)
Steve Blocka7e24c12009-10-30 11:49:00 +00002518 || RelocInfo::IsComment(rmode)
2519 || RelocInfo::IsPosition(rmode));
Andrei Popescu31002712010-02-23 13:46:05 +00002520 // These modes do not need an entry in the constant pool.
Steve Blocka7e24c12009-10-30 11:49:00 +00002521 } else {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002522 ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
2523 if (num_pending_reloc_info_ == 0) {
2524 first_const_pool_use_ = pc_offset();
2525 }
2526 pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
Steve Blocka7e24c12009-10-30 11:49:00 +00002527 // Make sure the constant pool is not emitted in place of the next
Andrei Popescu31002712010-02-23 13:46:05 +00002528 // instruction for which we just recorded relocation info.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002529 BlockConstPoolFor(1);
Steve Blocka7e24c12009-10-30 11:49:00 +00002530 }
2531 if (rinfo.rmode() != RelocInfo::NONE) {
2532 // Don't record external references unless the heap will be serialized.
Steve Blockd0582a62009-12-15 09:54:21 +00002533 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2534#ifdef DEBUG
2535 if (!Serializer::enabled()) {
2536 Serializer::TooLateToEnableNow();
2537 }
2538#endif
Steve Block44f0eee2011-05-26 01:26:41 +01002539 if (!Serializer::enabled() && !emit_debug_code()) {
Steve Blockd0582a62009-12-15 09:54:21 +00002540 return;
2541 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002542 }
2543 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
Ben Murdoch257744e2011-11-30 15:57:28 +00002544 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002545 RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002546 ClearRecordedAstId();
Ben Murdoch257744e2011-11-30 15:57:28 +00002547 reloc_info_writer.Write(&reloc_info_with_ast_id);
2548 } else {
2549 reloc_info_writer.Write(&rinfo);
2550 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002551 }
2552}
2553
2554
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002555void Assembler::BlockConstPoolFor(int instructions) {
2556 int pc_limit = pc_offset() + instructions * kInstrSize;
2557 if (no_const_pool_before_ < pc_limit) {
2558 // If there are some pending entries, the constant pool cannot be blocked
2559 // further than first_const_pool_use_ + kMaxDistToPool
2560 ASSERT((num_pending_reloc_info_ == 0) ||
2561 (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
2562 no_const_pool_before_ = pc_limit;
Steve Blocka7e24c12009-10-30 11:49:00 +00002563 }
2564
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002565 if (next_buffer_check_ < no_const_pool_before_) {
2566 next_buffer_check_ = no_const_pool_before_;
2567 }
2568}
Steve Blocka7e24c12009-10-30 11:49:00 +00002569
Steve Blocka7e24c12009-10-30 11:49:00 +00002570
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002571void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2572 // Some short sequence of instruction mustn't be broken up by constant pool
2573 // emission, such sequences are protected by calls to BlockConstPoolFor and
2574 // BlockConstPoolScope.
2575 if (is_const_pool_blocked()) {
Andrei Popescu31002712010-02-23 13:46:05 +00002576 // Something is wrong if emission is forced and blocked at the same time.
Steve Blocka7e24c12009-10-30 11:49:00 +00002577 ASSERT(!force_emit);
2578 return;
2579 }
2580
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002581 // There is nothing to do if there are no pending constant pool entries.
2582 if (num_pending_reloc_info_ == 0) {
2583 // Calculate the offset of the next check.
2584 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
2585 return;
2586 }
2587
2588 // We emit a constant pool when:
2589 // * requested to do so by parameter force_emit (e.g. after each function).
2590 // * the distance to the first instruction accessing the constant pool is
2591 // kAvgDistToPool or more.
2592 // * no jump is required and the distance to the first instruction accessing
2593 // the constant pool is at least kMaxDistToPool / 2.
2594 ASSERT(first_const_pool_use_ >= 0);
2595 int dist = pc_offset() - first_const_pool_use_;
2596 if (!force_emit && dist < kAvgDistToPool &&
2597 (require_jump || (dist < (kMaxDistToPool / 2)))) {
2598 return;
2599 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002600
2601 // Check that the code buffer is large enough before emitting the constant
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002602 // pool (include the jump over the pool and the constant pool marker and
2603 // the gap to the relocation information).
2604 int jump_instr = require_jump ? kInstrSize : 0;
2605 int needed_space = jump_instr + kInstrSize +
2606 num_pending_reloc_info_ * kInstrSize + kGap;
2607 while (buffer_space() <= needed_space) GrowBuffer();
Steve Blocka7e24c12009-10-30 11:49:00 +00002608
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002609 {
2610 // Block recursive calls to CheckConstPool.
2611 BlockConstPoolScope block_const_pool(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00002612
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002613 // Emit jump over constant pool if necessary.
2614 Label after_pool;
2615 if (require_jump) {
2616 b(&after_pool);
Steve Blocka7e24c12009-10-30 11:49:00 +00002617 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002618
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002619 RecordComment("[ Constant Pool");
Steve Blocka7e24c12009-10-30 11:49:00 +00002620
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002621 // Put down constant pool marker "Undefined instruction" as specified by
2622 // A5.6 (ARMv7) Instruction set encoding.
2623 emit(kConstantPoolMarker | num_pending_reloc_info_);
2624
2625 // Emit constant pool entries.
2626 for (int i = 0; i < num_pending_reloc_info_; i++) {
2627 RelocInfo& rinfo = pending_reloc_info_[i];
2628 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2629 rinfo.rmode() != RelocInfo::POSITION &&
2630 rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
2631
2632 Instr instr = instr_at(rinfo.pc());
2633 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
2634 ASSERT(IsLdrPcImmediateOffset(instr) &&
2635 GetLdrRegisterImmediateOffset(instr) == 0);
2636
2637 int delta = pc_ - rinfo.pc() - kPcLoadDelta;
2638 // 0 is the smallest delta:
2639 // ldr rd, [pc, #0]
2640 // constant pool marker
2641 // data
2642 ASSERT(is_uint12(delta));
2643
2644 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
2645 emit(rinfo.data());
2646 }
2647
2648 num_pending_reloc_info_ = 0;
2649 first_const_pool_use_ = -1;
2650
2651 RecordComment("]");
2652
2653 if (after_pool.is_linked()) {
2654 bind(&after_pool);
2655 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002656 }
2657
2658 // Since a constant pool was just emitted, move the check offset forward by
2659 // the standard interval.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002660 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
Steve Blocka7e24c12009-10-30 11:49:00 +00002661}
2662
2663
2664} } // namespace v8::internal
Leon Clarkef7060e22010-06-03 12:02:55 +01002665
2666#endif // V8_TARGET_ARCH_ARM