blob: be34df9c14411bcd2ac6aa7b4530988ff5218be8 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
Leon Clarked91b9f72010-01-27 17:25:45 +000033// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
35// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +000036
37#include "v8.h"
38
Leon Clarkef7060e22010-06-03 12:02:55 +010039#if defined(V8_TARGET_ARCH_ARM)
40
Steve Blocka7e24c12009-10-30 11:49:00 +000041#include "arm/assembler-arm-inl.h"
42#include "serialize.h"
43
44namespace v8 {
45namespace internal {
46
Steve Block44f0eee2011-05-26 01:26:41 +010047CpuFeatures::CpuFeatures()
48 : supported_(0),
49 enabled_(0),
50 found_by_runtime_probing_(0) {
51}
Andrei Popescu402d9372010-02-26 13:31:12 +000052
53#ifdef __arm__
54static uint64_t CpuFeaturesImpliedByCompiler() {
55 uint64_t answer = 0;
56#ifdef CAN_USE_ARMV7_INSTRUCTIONS
57 answer |= 1u << ARMv7;
58#endif // def CAN_USE_ARMV7_INSTRUCTIONS
59 // If the compiler is allowed to use VFP then we can use VFP too in our code
60 // generation even when generating snapshots. This won't work for cross
61 // compilation.
62#if defined(__VFP_FP__) && !defined(__SOFTFP__)
63 answer |= 1u << VFP3;
64#endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
65#ifdef CAN_USE_VFP_INSTRUCTIONS
66 answer |= 1u << VFP3;
67#endif // def CAN_USE_VFP_INSTRUCTIONS
68 return answer;
69}
70#endif // def __arm__
71
72
Ben Murdochb0fe1622011-05-05 13:52:32 +010073void CpuFeatures::Probe(bool portable) {
Andrei Popescu402d9372010-02-26 13:31:12 +000074#ifndef __arm__
Andrei Popescu31002712010-02-23 13:46:05 +000075 // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
76 if (FLAG_enable_vfp3) {
Steve Block6ded16b2010-05-10 14:33:55 +010077 supported_ |= 1u << VFP3;
Andrei Popescu31002712010-02-23 13:46:05 +000078 }
79 // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
80 if (FLAG_enable_armv7) {
Steve Block6ded16b2010-05-10 14:33:55 +010081 supported_ |= 1u << ARMv7;
Andrei Popescu31002712010-02-23 13:46:05 +000082 }
Andrei Popescu402d9372010-02-26 13:31:12 +000083#else // def __arm__
Ben Murdochb0fe1622011-05-05 13:52:32 +010084 if (portable && Serializer::enabled()) {
Andrei Popescu402d9372010-02-26 13:31:12 +000085 supported_ |= OS::CpuFeaturesImpliedByPlatform();
86 supported_ |= CpuFeaturesImpliedByCompiler();
Steve Blockd0582a62009-12-15 09:54:21 +000087 return; // No features if we might serialize.
88 }
89
90 if (OS::ArmCpuHasFeature(VFP3)) {
91 // This implementation also sets the VFP flags if
92 // runtime detection of VFP returns true.
93 supported_ |= 1u << VFP3;
94 found_by_runtime_probing_ |= 1u << VFP3;
95 }
Andrei Popescu31002712010-02-23 13:46:05 +000096
97 if (OS::ArmCpuHasFeature(ARMv7)) {
98 supported_ |= 1u << ARMv7;
99 found_by_runtime_probing_ |= 1u << ARMv7;
100 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100101
102 if (!portable) found_by_runtime_probing_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100103#endif
Steve Blockd0582a62009-12-15 09:54:21 +0000104}
105
106
Steve Blocka7e24c12009-10-30 11:49:00 +0000107// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000108// Implementation of RelocInfo
109
110const int RelocInfo::kApplyMask = 0;
111
112
Leon Clarkef7060e22010-06-03 12:02:55 +0100113bool RelocInfo::IsCodedSpecially() {
114 // The deserializer needs to know whether a pointer is specially coded. Being
115 // specially coded on ARM means that it is a movw/movt instruction. We don't
116 // generate those yet.
117 return false;
118}
119
120
121
Steve Blocka7e24c12009-10-30 11:49:00 +0000122void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
123 // Patch the code at the current address with the supplied instructions.
124 Instr* pc = reinterpret_cast<Instr*>(pc_);
125 Instr* instr = reinterpret_cast<Instr*>(instructions);
126 for (int i = 0; i < instruction_count; i++) {
127 *(pc + i) = *(instr + i);
128 }
129
130 // Indicate that code has changed.
131 CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
132}
133
134
135// Patch the code at the current PC with a call to the target address.
136// Additional guard instructions can be added if required.
137void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
138 // Patch the code at the current address with a call to the target.
139 UNIMPLEMENTED();
140}
141
142
143// -----------------------------------------------------------------------------
144// Implementation of Operand and MemOperand
145// See assembler-arm-inl.h for inlined constructors
146
147Operand::Operand(Handle<Object> handle) {
148 rm_ = no_reg;
149 // Verify all Objects referred by code are NOT in new space.
150 Object* obj = *handle;
Steve Block44f0eee2011-05-26 01:26:41 +0100151 ASSERT(!HEAP->InNewSpace(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +0000152 if (obj->IsHeapObject()) {
153 imm32_ = reinterpret_cast<intptr_t>(handle.location());
154 rmode_ = RelocInfo::EMBEDDED_OBJECT;
155 } else {
156 // no relocation needed
157 imm32_ = reinterpret_cast<intptr_t>(obj);
158 rmode_ = RelocInfo::NONE;
159 }
160}
161
162
163Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
164 ASSERT(is_uint5(shift_imm));
165 ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
166 rm_ = rm;
167 rs_ = no_reg;
168 shift_op_ = shift_op;
169 shift_imm_ = shift_imm & 31;
170 if (shift_op == RRX) {
171 // encoded as ROR with shift_imm == 0
172 ASSERT(shift_imm == 0);
173 shift_op_ = ROR;
174 shift_imm_ = 0;
175 }
176}
177
178
179Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
180 ASSERT(shift_op != RRX);
181 rm_ = rm;
182 rs_ = no_reg;
183 shift_op_ = shift_op;
184 rs_ = rs;
185}
186
187
188MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
189 rn_ = rn;
190 rm_ = no_reg;
191 offset_ = offset;
192 am_ = am;
193}
194
195MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
196 rn_ = rn;
197 rm_ = rm;
198 shift_op_ = LSL;
199 shift_imm_ = 0;
200 am_ = am;
201}
202
203
204MemOperand::MemOperand(Register rn, Register rm,
205 ShiftOp shift_op, int shift_imm, AddrMode am) {
206 ASSERT(is_uint5(shift_imm));
207 rn_ = rn;
208 rm_ = rm;
209 shift_op_ = shift_op;
210 shift_imm_ = shift_imm & 31;
211 am_ = am;
212}
213
214
215// -----------------------------------------------------------------------------
Steve Block1e0659c2011-05-24 12:43:12 +0100216// Specific instructions, constants, and masks.
Steve Blocka7e24c12009-10-30 11:49:00 +0000217
218// add(sp, sp, 4) instruction (aka Pop())
Steve Block1e0659c2011-05-24 12:43:12 +0100219const Instr kPopInstruction =
220 al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
Steve Blocka7e24c12009-10-30 11:49:00 +0000221// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
222// register r is not encoded.
Steve Block1e0659c2011-05-24 12:43:12 +0100223const Instr kPushRegPattern =
Steve Blocka7e24c12009-10-30 11:49:00 +0000224 al | B26 | 4 | NegPreIndex | sp.code() * B16;
225// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
226// register r is not encoded.
Steve Block1e0659c2011-05-24 12:43:12 +0100227const Instr kPopRegPattern =
Steve Blocka7e24c12009-10-30 11:49:00 +0000228 al | B26 | L | 4 | PostIndex | sp.code() * B16;
229// mov lr, pc
Steve Block1e0659c2011-05-24 12:43:12 +0100230const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
Steve Block6ded16b2010-05-10 14:33:55 +0100231// ldr rd, [pc, #offset]
Steve Block1e0659c2011-05-24 12:43:12 +0100232const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
Steve Block6ded16b2010-05-10 14:33:55 +0100233const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
234// blxcc rm
235const Instr kBlxRegMask =
236 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
237const Instr kBlxRegPattern =
Steve Block1e0659c2011-05-24 12:43:12 +0100238 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100239const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
240const Instr kMovMvnPattern = 0xd * B21;
241const Instr kMovMvnFlip = B22;
242const Instr kMovLeaveCCMask = 0xdff * B16;
243const Instr kMovLeaveCCPattern = 0x1a0 * B16;
244const Instr kMovwMask = 0xff * B20;
245const Instr kMovwPattern = 0x30 * B20;
246const Instr kMovwLeaveCCFlip = 0x5 * B21;
247const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
248const Instr kCmpCmnPattern = 0x15 * B20;
249const Instr kCmpCmnFlip = B21;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100250const Instr kAddSubFlip = 0x6 * B21;
251const Instr kAndBicFlip = 0xe * B21;
252
Leon Clarkef7060e22010-06-03 12:02:55 +0100253// A mask for the Rd register for push, pop, ldr, str instructions.
Steve Block1e0659c2011-05-24 12:43:12 +0100254const Instr kLdrRegFpOffsetPattern =
Leon Clarkef7060e22010-06-03 12:02:55 +0100255 al | B26 | L | Offset | fp.code() * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100256const Instr kStrRegFpOffsetPattern =
Leon Clarkef7060e22010-06-03 12:02:55 +0100257 al | B26 | Offset | fp.code() * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100258const Instr kLdrRegFpNegOffsetPattern =
Leon Clarkef7060e22010-06-03 12:02:55 +0100259 al | B26 | L | NegOffset | fp.code() * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100260const Instr kStrRegFpNegOffsetPattern =
Leon Clarkef7060e22010-06-03 12:02:55 +0100261 al | B26 | NegOffset | fp.code() * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100262const Instr kLdrStrInstrTypeMask = 0xffff0000;
263const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
264const Instr kLdrStrOffsetMask = 0x00000fff;
265
Steve Blocka7e24c12009-10-30 11:49:00 +0000266
Andrei Popescu31002712010-02-23 13:46:05 +0000267// Spare buffer.
Steve Blocka7e24c12009-10-30 11:49:00 +0000268static const int kMinimalBufferSize = 4*KB;
Steve Blocka7e24c12009-10-30 11:49:00 +0000269
Steve Block1e0659c2011-05-24 12:43:12 +0100270
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800271Assembler::Assembler(void* buffer, int buffer_size)
Steve Block44f0eee2011-05-26 01:26:41 +0100272 : AssemblerBase(Isolate::Current()),
273 positions_recorder_(this),
274 allow_peephole_optimization_(false),
275 emit_debug_code_(FLAG_debug_code) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100276 allow_peephole_optimization_ = FLAG_peephole_optimization;
Steve Blocka7e24c12009-10-30 11:49:00 +0000277 if (buffer == NULL) {
Andrei Popescu31002712010-02-23 13:46:05 +0000278 // Do our own buffer management.
Steve Blocka7e24c12009-10-30 11:49:00 +0000279 if (buffer_size <= kMinimalBufferSize) {
280 buffer_size = kMinimalBufferSize;
281
Steve Block44f0eee2011-05-26 01:26:41 +0100282 if (isolate()->assembler_spare_buffer() != NULL) {
283 buffer = isolate()->assembler_spare_buffer();
284 isolate()->set_assembler_spare_buffer(NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000285 }
286 }
287 if (buffer == NULL) {
288 buffer_ = NewArray<byte>(buffer_size);
289 } else {
290 buffer_ = static_cast<byte*>(buffer);
291 }
292 buffer_size_ = buffer_size;
293 own_buffer_ = true;
294
295 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000296 // Use externally provided buffer instead.
Steve Blocka7e24c12009-10-30 11:49:00 +0000297 ASSERT(buffer_size > 0);
298 buffer_ = static_cast<byte*>(buffer);
299 buffer_size_ = buffer_size;
300 own_buffer_ = false;
301 }
302
Andrei Popescu31002712010-02-23 13:46:05 +0000303 // Setup buffer pointers.
Steve Blocka7e24c12009-10-30 11:49:00 +0000304 ASSERT(buffer_ != NULL);
305 pc_ = buffer_;
306 reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
307 num_prinfo_ = 0;
308 next_buffer_check_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100309 const_pool_blocked_nesting_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000310 no_const_pool_before_ = 0;
311 last_const_pool_end_ = 0;
312 last_bound_pos_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000313}
314
315
316Assembler::~Assembler() {
Steve Block6ded16b2010-05-10 14:33:55 +0100317 ASSERT(const_pool_blocked_nesting_ == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000318 if (own_buffer_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100319 if (isolate()->assembler_spare_buffer() == NULL &&
320 buffer_size_ == kMinimalBufferSize) {
321 isolate()->set_assembler_spare_buffer(buffer_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000322 } else {
323 DeleteArray(buffer_);
324 }
325 }
326}
327
328
329void Assembler::GetCode(CodeDesc* desc) {
Andrei Popescu31002712010-02-23 13:46:05 +0000330 // Emit constant pool if necessary.
Steve Blocka7e24c12009-10-30 11:49:00 +0000331 CheckConstPool(true, false);
332 ASSERT(num_prinfo_ == 0);
333
Andrei Popescu31002712010-02-23 13:46:05 +0000334 // Setup code descriptor.
Steve Blocka7e24c12009-10-30 11:49:00 +0000335 desc->buffer = buffer_;
336 desc->buffer_size = buffer_size_;
337 desc->instr_size = pc_offset();
338 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
339}
340
341
342void Assembler::Align(int m) {
343 ASSERT(m >= 4 && IsPowerOf2(m));
344 while ((pc_offset() & (m - 1)) != 0) {
345 nop();
346 }
347}
348
349
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100350void Assembler::CodeTargetAlign() {
351 // Preferred alignment of jump targets on some ARM chips.
352 Align(8);
353}
354
355
Steve Block1e0659c2011-05-24 12:43:12 +0100356Condition Assembler::GetCondition(Instr instr) {
357 return Instruction::ConditionField(instr);
358}
359
360
Steve Block6ded16b2010-05-10 14:33:55 +0100361bool Assembler::IsBranch(Instr instr) {
362 return (instr & (B27 | B25)) == (B27 | B25);
363}
364
365
366int Assembler::GetBranchOffset(Instr instr) {
367 ASSERT(IsBranch(instr));
368 // Take the jump offset in the lower 24 bits, sign extend it and multiply it
369 // with 4 to get the offset in bytes.
Steve Block1e0659c2011-05-24 12:43:12 +0100370 return ((instr & kImm24Mask) << 8) >> 6;
Steve Block6ded16b2010-05-10 14:33:55 +0100371}
372
373
374bool Assembler::IsLdrRegisterImmediate(Instr instr) {
375 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
376}
377
378
379int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
380 ASSERT(IsLdrRegisterImmediate(instr));
381 bool positive = (instr & B23) == B23;
Steve Block1e0659c2011-05-24 12:43:12 +0100382 int offset = instr & kOff12Mask; // Zero extended offset.
Steve Block6ded16b2010-05-10 14:33:55 +0100383 return positive ? offset : -offset;
384}
385
386
387Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
388 ASSERT(IsLdrRegisterImmediate(instr));
389 bool positive = offset >= 0;
390 if (!positive) offset = -offset;
391 ASSERT(is_uint12(offset));
392 // Set bit indicating whether the offset should be added.
393 instr = (instr & ~B23) | (positive ? B23 : 0);
394 // Set the actual offset.
Steve Block1e0659c2011-05-24 12:43:12 +0100395 return (instr & ~kOff12Mask) | offset;
Steve Block6ded16b2010-05-10 14:33:55 +0100396}
397
398
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100399bool Assembler::IsStrRegisterImmediate(Instr instr) {
400 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
401}
402
403
404Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
405 ASSERT(IsStrRegisterImmediate(instr));
406 bool positive = offset >= 0;
407 if (!positive) offset = -offset;
408 ASSERT(is_uint12(offset));
409 // Set bit indicating whether the offset should be added.
410 instr = (instr & ~B23) | (positive ? B23 : 0);
411 // Set the actual offset.
Steve Block1e0659c2011-05-24 12:43:12 +0100412 return (instr & ~kOff12Mask) | offset;
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100413}
414
415
416bool Assembler::IsAddRegisterImmediate(Instr instr) {
417 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
418}
419
420
421Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
422 ASSERT(IsAddRegisterImmediate(instr));
423 ASSERT(offset >= 0);
424 ASSERT(is_uint12(offset));
425 // Set the offset.
Steve Block1e0659c2011-05-24 12:43:12 +0100426 return (instr & ~kOff12Mask) | offset;
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100427}
428
429
Leon Clarkef7060e22010-06-03 12:02:55 +0100430Register Assembler::GetRd(Instr instr) {
431 Register reg;
Steve Block1e0659c2011-05-24 12:43:12 +0100432 reg.code_ = Instruction::RdValue(instr);
433 return reg;
434}
435
436
437Register Assembler::GetRn(Instr instr) {
438 Register reg;
439 reg.code_ = Instruction::RnValue(instr);
440 return reg;
441}
442
443
444Register Assembler::GetRm(Instr instr) {
445 Register reg;
446 reg.code_ = Instruction::RmValue(instr);
Leon Clarkef7060e22010-06-03 12:02:55 +0100447 return reg;
448}
449
450
451bool Assembler::IsPush(Instr instr) {
452 return ((instr & ~kRdMask) == kPushRegPattern);
453}
454
455
456bool Assembler::IsPop(Instr instr) {
457 return ((instr & ~kRdMask) == kPopRegPattern);
458}
459
460
461bool Assembler::IsStrRegFpOffset(Instr instr) {
462 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
463}
464
465
466bool Assembler::IsLdrRegFpOffset(Instr instr) {
467 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
468}
469
470
471bool Assembler::IsStrRegFpNegOffset(Instr instr) {
472 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
473}
474
475
476bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
477 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
478}
479
480
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800481bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
482 // Check the instruction is indeed a
483 // ldr<cond> <Rd>, [pc +/- offset_12].
Steve Block1e0659c2011-05-24 12:43:12 +0100484 return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800485}
486
487
Steve Block1e0659c2011-05-24 12:43:12 +0100488bool Assembler::IsTstImmediate(Instr instr) {
489 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
490 (I | TST | S);
491}
492
493
494bool Assembler::IsCmpRegister(Instr instr) {
495 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
496 (CMP | S);
497}
498
499
500bool Assembler::IsCmpImmediate(Instr instr) {
501 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
502 (I | CMP | S);
503}
504
505
506Register Assembler::GetCmpImmediateRegister(Instr instr) {
507 ASSERT(IsCmpImmediate(instr));
508 return GetRn(instr);
509}
510
511
512int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
513 ASSERT(IsCmpImmediate(instr));
514 return instr & kOff12Mask;
515}
516
Steve Blocka7e24c12009-10-30 11:49:00 +0000517// Labels refer to positions in the (to be) generated code.
518// There are bound, linked, and unused labels.
519//
520// Bound labels refer to known positions in the already
521// generated code. pos() is the position the label refers to.
522//
523// Linked labels refer to unknown positions in the code
524// to be generated; pos() is the position of the last
525// instruction using the label.
526
527
528// The link chain is terminated by a negative code position (must be aligned)
529const int kEndOfChain = -4;
530
531
532int Assembler::target_at(int pos) {
533 Instr instr = instr_at(pos);
Steve Block1e0659c2011-05-24 12:43:12 +0100534 if ((instr & ~kImm24Mask) == 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000535 // Emitted label constant, not part of a branch.
536 return instr - (Code::kHeaderSize - kHeapObjectTag);
537 }
538 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
Steve Block1e0659c2011-05-24 12:43:12 +0100539 int imm26 = ((instr & kImm24Mask) << 8) >> 6;
540 if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
541 ((instr & B24) != 0)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000542 // blx uses bit 24 to encode bit 2 of imm26
543 imm26 += 2;
Steve Block6ded16b2010-05-10 14:33:55 +0100544 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000545 return pos + kPcLoadDelta + imm26;
546}
547
548
549void Assembler::target_at_put(int pos, int target_pos) {
550 Instr instr = instr_at(pos);
Steve Block1e0659c2011-05-24 12:43:12 +0100551 if ((instr & ~kImm24Mask) == 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000552 ASSERT(target_pos == kEndOfChain || target_pos >= 0);
553 // Emitted label constant, not part of a branch.
554 // Make label relative to Code* of generated Code object.
555 instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
556 return;
557 }
558 int imm26 = target_pos - (pos + kPcLoadDelta);
559 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
Steve Block1e0659c2011-05-24 12:43:12 +0100560 if (Instruction::ConditionField(instr) == kSpecialCondition) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000561 // blx uses bit 24 to encode bit 2 of imm26
562 ASSERT((imm26 & 1) == 0);
Steve Block1e0659c2011-05-24 12:43:12 +0100563 instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
Steve Blocka7e24c12009-10-30 11:49:00 +0000564 } else {
565 ASSERT((imm26 & 3) == 0);
Steve Block1e0659c2011-05-24 12:43:12 +0100566 instr &= ~kImm24Mask;
Steve Blocka7e24c12009-10-30 11:49:00 +0000567 }
568 int imm24 = imm26 >> 2;
569 ASSERT(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +0100570 instr_at_put(pos, instr | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +0000571}
572
573
574void Assembler::print(Label* L) {
575 if (L->is_unused()) {
576 PrintF("unused label\n");
577 } else if (L->is_bound()) {
578 PrintF("bound label to %d\n", L->pos());
579 } else if (L->is_linked()) {
580 Label l = *L;
581 PrintF("unbound label");
582 while (l.is_linked()) {
583 PrintF("@ %d ", l.pos());
584 Instr instr = instr_at(l.pos());
Steve Block1e0659c2011-05-24 12:43:12 +0100585 if ((instr & ~kImm24Mask) == 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000586 PrintF("value\n");
587 } else {
588 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
Steve Block1e0659c2011-05-24 12:43:12 +0100589 Condition cond = Instruction::ConditionField(instr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000590 const char* b;
591 const char* c;
Steve Block1e0659c2011-05-24 12:43:12 +0100592 if (cond == kSpecialCondition) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000593 b = "blx";
594 c = "";
595 } else {
596 if ((instr & B24) != 0)
597 b = "bl";
598 else
599 b = "b";
600
601 switch (cond) {
602 case eq: c = "eq"; break;
603 case ne: c = "ne"; break;
604 case hs: c = "hs"; break;
605 case lo: c = "lo"; break;
606 case mi: c = "mi"; break;
607 case pl: c = "pl"; break;
608 case vs: c = "vs"; break;
609 case vc: c = "vc"; break;
610 case hi: c = "hi"; break;
611 case ls: c = "ls"; break;
612 case ge: c = "ge"; break;
613 case lt: c = "lt"; break;
614 case gt: c = "gt"; break;
615 case le: c = "le"; break;
616 case al: c = ""; break;
617 default:
618 c = "";
619 UNREACHABLE();
620 }
621 }
622 PrintF("%s%s\n", b, c);
623 }
624 next(&l);
625 }
626 } else {
627 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
628 }
629}
630
631
632void Assembler::bind_to(Label* L, int pos) {
633 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
634 while (L->is_linked()) {
635 int fixup_pos = L->pos();
636 next(L); // call next before overwriting link with target at fixup_pos
637 target_at_put(fixup_pos, pos);
638 }
639 L->bind_to(pos);
640
641 // Keep track of the last bound label so we don't eliminate any instructions
642 // before a bound label.
643 if (pos > last_bound_pos_)
644 last_bound_pos_ = pos;
645}
646
647
648void Assembler::link_to(Label* L, Label* appendix) {
649 if (appendix->is_linked()) {
650 if (L->is_linked()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000651 // Append appendix to L's list.
Steve Blocka7e24c12009-10-30 11:49:00 +0000652 int fixup_pos;
653 int link = L->pos();
654 do {
655 fixup_pos = link;
656 link = target_at(fixup_pos);
657 } while (link > 0);
658 ASSERT(link == kEndOfChain);
659 target_at_put(fixup_pos, appendix->pos());
660 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000661 // L is empty, simply use appendix.
Steve Blocka7e24c12009-10-30 11:49:00 +0000662 *L = *appendix;
663 }
664 }
665 appendix->Unuse(); // appendix should not be used anymore
666}
667
668
669void Assembler::bind(Label* L) {
670 ASSERT(!L->is_bound()); // label can only be bound once
671 bind_to(L, pc_offset());
672}
673
674
675void Assembler::next(Label* L) {
676 ASSERT(L->is_linked());
677 int link = target_at(L->pos());
678 if (link > 0) {
679 L->link_to(link);
680 } else {
681 ASSERT(link == kEndOfChain);
682 L->Unuse();
683 }
684}
685
686
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100687static Instr EncodeMovwImmediate(uint32_t immediate) {
688 ASSERT(immediate < 0x10000);
689 return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
690}
691
692
Andrei Popescu31002712010-02-23 13:46:05 +0000693// Low-level code emission routines depending on the addressing mode.
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100694// If this returns true then you have to use the rotate_imm and immed_8
695// that it returns, because it may have already changed the instruction
696// to match them!
Steve Blocka7e24c12009-10-30 11:49:00 +0000697static bool fits_shifter(uint32_t imm32,
698 uint32_t* rotate_imm,
699 uint32_t* immed_8,
700 Instr* instr) {
Andrei Popescu31002712010-02-23 13:46:05 +0000701 // imm32 must be unsigned.
Steve Blocka7e24c12009-10-30 11:49:00 +0000702 for (int rot = 0; rot < 16; rot++) {
703 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
704 if ((imm8 <= 0xff)) {
705 *rotate_imm = rot;
706 *immed_8 = imm8;
707 return true;
708 }
709 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100710 // If the opcode is one with a complementary version and the complementary
711 // immediate fits, change the opcode.
712 if (instr != NULL) {
713 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
714 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
715 *instr ^= kMovMvnFlip;
716 return true;
717 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
Steve Block44f0eee2011-05-26 01:26:41 +0100718 if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100719 if (imm32 < 0x10000) {
720 *instr ^= kMovwLeaveCCFlip;
721 *instr |= EncodeMovwImmediate(imm32);
722 *rotate_imm = *immed_8 = 0; // Not used for movw.
723 return true;
724 }
725 }
726 }
727 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
728 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
729 *instr ^= kCmpCmnFlip;
730 return true;
731 }
732 } else {
733 Instr alu_insn = (*instr & kALUMask);
Steve Block1e0659c2011-05-24 12:43:12 +0100734 if (alu_insn == ADD ||
735 alu_insn == SUB) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100736 if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
737 *instr ^= kAddSubFlip;
738 return true;
739 }
Steve Block1e0659c2011-05-24 12:43:12 +0100740 } else if (alu_insn == AND ||
741 alu_insn == BIC) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100742 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
743 *instr ^= kAndBicFlip;
744 return true;
745 }
746 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000747 }
748 }
749 return false;
750}
751
752
753// We have to use the temporary register for things that can be relocated even
754// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
755// space. There is no guarantee that the relocated location can be similarly
756// encoded.
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800757bool Operand::must_use_constant_pool() const {
758 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
Steve Blockd0582a62009-12-15 09:54:21 +0000759#ifdef DEBUG
760 if (!Serializer::enabled()) {
761 Serializer::TooLateToEnableNow();
762 }
Andrei Popescu402d9372010-02-26 13:31:12 +0000763#endif // def DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000764 return Serializer::enabled();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800765 } else if (rmode_ == RelocInfo::NONE) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000766 return false;
767 }
768 return true;
769}
770
771
Steve Block44f0eee2011-05-26 01:26:41 +0100772bool Operand::is_single_instruction(Instr instr) const {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100773 if (rm_.is_valid()) return true;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100774 uint32_t dummy1, dummy2;
Steve Block44f0eee2011-05-26 01:26:41 +0100775 if (must_use_constant_pool() ||
776 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
777 // The immediate operand cannot be encoded as a shifter operand, or use of
778 // constant pool is required. For a mov instruction not setting the
779 // condition code additional instruction conventions can be used.
780 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
781 if (must_use_constant_pool() ||
782 !Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
783 // mov instruction will be an ldr from constant pool (one instruction).
784 return true;
785 } else {
786 // mov instruction will be a mov or movw followed by movt (two
787 // instructions).
788 return false;
789 }
790 } else {
791 // If this is not a mov or mvn instruction there will always an additional
792 // instructions - either mov or ldr. The mov might actually be two
793 // instructions mov or movw followed by movt so including the actual
794 // instruction two or three instructions will be generated.
795 return false;
796 }
797 } else {
798 // No use of constant pool and the immediate operand can be encoded as a
799 // shifter operand.
800 return true;
801 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100802}
803
804
Steve Blocka7e24c12009-10-30 11:49:00 +0000805void Assembler::addrmod1(Instr instr,
806 Register rn,
807 Register rd,
808 const Operand& x) {
809 CheckBuffer();
Steve Block1e0659c2011-05-24 12:43:12 +0100810 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000811 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000812 // Immediate.
Steve Blocka7e24c12009-10-30 11:49:00 +0000813 uint32_t rotate_imm;
814 uint32_t immed_8;
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800815 if (x.must_use_constant_pool() ||
Steve Blocka7e24c12009-10-30 11:49:00 +0000816 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
817 // The immediate operand cannot be encoded as a shifter operand, so load
818 // it first to register ip and change the original instruction to use ip.
819 // However, if the original instruction is a 'mov rd, x' (not setting the
Andrei Popescu31002712010-02-23 13:46:05 +0000820 // condition code), then replace it with a 'ldr rd, [pc]'.
Steve Blocka7e24c12009-10-30 11:49:00 +0000821 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Steve Block1e0659c2011-05-24 12:43:12 +0100822 Condition cond = Instruction::ConditionField(instr);
823 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
Steve Block44f0eee2011-05-26 01:26:41 +0100824 if (x.must_use_constant_pool() ||
825 !isolate()->cpu_features()->IsSupported(ARMv7)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100826 RecordRelocInfo(x.rmode_, x.imm32_);
827 ldr(rd, MemOperand(pc, 0), cond);
828 } else {
829 // Will probably use movw, will certainly not use constant pool.
830 mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
831 movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
832 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000833 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100834 // If this is not a mov or mvn instruction we may still be able to avoid
835 // a constant pool entry by using mvn or movw.
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800836 if (!x.must_use_constant_pool() &&
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100837 (instr & kMovMvnMask) != kMovMvnPattern) {
838 mov(ip, x, LeaveCC, cond);
839 } else {
840 RecordRelocInfo(x.rmode_, x.imm32_);
841 ldr(ip, MemOperand(pc, 0), cond);
842 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000843 addrmod1(instr, rn, rd, Operand(ip));
844 }
845 return;
846 }
847 instr |= I | rotate_imm*B8 | immed_8;
848 } else if (!x.rs_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000849 // Immediate shift.
Steve Blocka7e24c12009-10-30 11:49:00 +0000850 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
851 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000852 // Register shift.
Steve Blocka7e24c12009-10-30 11:49:00 +0000853 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
854 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
855 }
856 emit(instr | rn.code()*B16 | rd.code()*B12);
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100857 if (rn.is(pc) || x.rm_.is(pc)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000858 // Block constant pool emission for one instruction after reading pc.
Steve Blocka7e24c12009-10-30 11:49:00 +0000859 BlockConstPoolBefore(pc_offset() + kInstrSize);
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100860 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000861}
862
863
864void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
Steve Block1e0659c2011-05-24 12:43:12 +0100865 ASSERT((instr & ~(kCondMask | B | L)) == B26);
Steve Blocka7e24c12009-10-30 11:49:00 +0000866 int am = x.am_;
867 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000868 // Immediate offset.
Steve Blocka7e24c12009-10-30 11:49:00 +0000869 int offset_12 = x.offset_;
870 if (offset_12 < 0) {
871 offset_12 = -offset_12;
872 am ^= U;
873 }
874 if (!is_uint12(offset_12)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000875 // Immediate offset cannot be encoded, load it first to register ip
876 // rn (and rd in a load) should never be ip, or will be trashed.
Steve Blocka7e24c12009-10-30 11:49:00 +0000877 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
Steve Block1e0659c2011-05-24 12:43:12 +0100878 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
Steve Blocka7e24c12009-10-30 11:49:00 +0000879 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
880 return;
881 }
882 ASSERT(offset_12 >= 0); // no masking needed
883 instr |= offset_12;
884 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000885 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
Steve Blocka7e24c12009-10-30 11:49:00 +0000886 // register offset the constructors make sure than both shift_imm_
Andrei Popescu31002712010-02-23 13:46:05 +0000887 // and shift_op_ are initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +0000888 ASSERT(!x.rm_.is(pc));
889 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
890 }
891 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
892 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
893}
894
895
896void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
Steve Block1e0659c2011-05-24 12:43:12 +0100897 ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
Steve Blocka7e24c12009-10-30 11:49:00 +0000898 ASSERT(x.rn_.is_valid());
899 int am = x.am_;
900 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +0000901 // Immediate offset.
Steve Blocka7e24c12009-10-30 11:49:00 +0000902 int offset_8 = x.offset_;
903 if (offset_8 < 0) {
904 offset_8 = -offset_8;
905 am ^= U;
906 }
907 if (!is_uint8(offset_8)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000908 // Immediate offset cannot be encoded, load it first to register ip
909 // rn (and rd in a load) should never be ip, or will be trashed.
Steve Blocka7e24c12009-10-30 11:49:00 +0000910 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
Steve Block1e0659c2011-05-24 12:43:12 +0100911 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
Steve Blocka7e24c12009-10-30 11:49:00 +0000912 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
913 return;
914 }
915 ASSERT(offset_8 >= 0); // no masking needed
916 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
917 } else if (x.shift_imm_ != 0) {
Andrei Popescu31002712010-02-23 13:46:05 +0000918 // Scaled register offset not supported, load index first
919 // rn (and rd in a load) should never be ip, or will be trashed.
Steve Blocka7e24c12009-10-30 11:49:00 +0000920 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
921 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
Steve Block1e0659c2011-05-24 12:43:12 +0100922 Instruction::ConditionField(instr));
Steve Blocka7e24c12009-10-30 11:49:00 +0000923 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
924 return;
925 } else {
Andrei Popescu31002712010-02-23 13:46:05 +0000926 // Register offset.
Steve Blocka7e24c12009-10-30 11:49:00 +0000927 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
928 instr |= x.rm_.code();
929 }
930 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
931 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
932}
933
934
935void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
Steve Block1e0659c2011-05-24 12:43:12 +0100936 ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
Steve Blocka7e24c12009-10-30 11:49:00 +0000937 ASSERT(rl != 0);
938 ASSERT(!rn.is(pc));
939 emit(instr | rn.code()*B16 | rl);
940}
941
942
943void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
Andrei Popescu31002712010-02-23 13:46:05 +0000944 // Unindexed addressing is not encoded by this function.
Steve Blocka7e24c12009-10-30 11:49:00 +0000945 ASSERT_EQ((B27 | B26),
Steve Block1e0659c2011-05-24 12:43:12 +0100946 (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000947 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
948 int am = x.am_;
949 int offset_8 = x.offset_;
950 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
951 offset_8 >>= 2;
952 if (offset_8 < 0) {
953 offset_8 = -offset_8;
954 am ^= U;
955 }
956 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
957 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
958
Andrei Popescu31002712010-02-23 13:46:05 +0000959 // Post-indexed addressing requires W == 1; different than in addrmod2/3.
Steve Blocka7e24c12009-10-30 11:49:00 +0000960 if ((am & P) == 0)
961 am |= W;
962
963 ASSERT(offset_8 >= 0); // no masking needed
964 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
965}
966
967
968int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
969 int target_pos;
970 if (L->is_bound()) {
971 target_pos = L->pos();
972 } else {
973 if (L->is_linked()) {
974 target_pos = L->pos(); // L's link
975 } else {
976 target_pos = kEndOfChain;
977 }
978 L->link_to(pc_offset());
979 }
980
981 // Block the emission of the constant pool, since the branch instruction must
Andrei Popescu31002712010-02-23 13:46:05 +0000982 // be emitted at the pc offset recorded by the label.
Steve Blocka7e24c12009-10-30 11:49:00 +0000983 BlockConstPoolBefore(pc_offset() + kInstrSize);
984 return target_pos - (pc_offset() + kPcLoadDelta);
985}
986
987
988void Assembler::label_at_put(Label* L, int at_offset) {
989 int target_pos;
990 if (L->is_bound()) {
991 target_pos = L->pos();
992 } else {
993 if (L->is_linked()) {
994 target_pos = L->pos(); // L's link
995 } else {
996 target_pos = kEndOfChain;
997 }
998 L->link_to(at_offset);
999 instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1000 }
1001}
1002
1003
Andrei Popescu31002712010-02-23 13:46:05 +00001004// Branch instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001005void Assembler::b(int branch_offset, Condition cond) {
1006 ASSERT((branch_offset & 3) == 0);
1007 int imm24 = branch_offset >> 2;
1008 ASSERT(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +01001009 emit(cond | B27 | B25 | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001010
Steve Block6ded16b2010-05-10 14:33:55 +01001011 if (cond == al) {
Andrei Popescu31002712010-02-23 13:46:05 +00001012 // Dead code is a good location to emit the constant pool.
Steve Blocka7e24c12009-10-30 11:49:00 +00001013 CheckConstPool(false, false);
Steve Block6ded16b2010-05-10 14:33:55 +01001014 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001015}
1016
1017
1018void Assembler::bl(int branch_offset, Condition cond) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001019 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001020 ASSERT((branch_offset & 3) == 0);
1021 int imm24 = branch_offset >> 2;
1022 ASSERT(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +01001023 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001024}
1025
1026
1027void Assembler::blx(int branch_offset) { // v5 and above
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001028 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001029 ASSERT((branch_offset & 1) == 0);
1030 int h = ((branch_offset & 2) >> 1)*B24;
1031 int imm24 = branch_offset >> 2;
1032 ASSERT(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +01001033 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001034}
1035
1036
1037void Assembler::blx(Register target, Condition cond) { // v5 and above
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001038 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001039 ASSERT(!target.is(pc));
Steve Block1e0659c2011-05-24 12:43:12 +01001040 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001041}
1042
1043
1044void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001045 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001046 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
Steve Block1e0659c2011-05-24 12:43:12 +01001047 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001048}
1049
1050
Andrei Popescu31002712010-02-23 13:46:05 +00001051// Data-processing instructions.
1052
Steve Blocka7e24c12009-10-30 11:49:00 +00001053void Assembler::and_(Register dst, Register src1, const Operand& src2,
1054 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001055 addrmod1(cond | AND | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001056}
1057
1058
1059void Assembler::eor(Register dst, Register src1, const Operand& src2,
1060 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001061 addrmod1(cond | EOR | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001062}
1063
1064
1065void Assembler::sub(Register dst, Register src1, const Operand& src2,
1066 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001067 addrmod1(cond | SUB | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001068}
1069
1070
1071void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1072 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001073 addrmod1(cond | RSB | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001074}
1075
1076
1077void Assembler::add(Register dst, Register src1, const Operand& src2,
1078 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001079 addrmod1(cond | ADD | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001080
1081 // Eliminate pattern: push(r), pop()
1082 // str(src, MemOperand(sp, 4, NegPreIndex), al);
1083 // add(sp, sp, Operand(kPointerSize));
1084 // Both instructions can be eliminated.
Leon Clarkef7060e22010-06-03 12:02:55 +01001085 if (can_peephole_optimize(2) &&
Andrei Popescu31002712010-02-23 13:46:05 +00001086 // Pattern.
Steve Blocka7e24c12009-10-30 11:49:00 +00001087 instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
Steve Block1e0659c2011-05-24 12:43:12 +01001088 (instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001089 pc_ -= 2 * kInstrSize;
Leon Clarkef7060e22010-06-03 12:02:55 +01001090 if (FLAG_print_peephole_optimization) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001091 PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
1092 }
1093 }
1094}
1095
1096
1097void Assembler::adc(Register dst, Register src1, const Operand& src2,
1098 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001099 addrmod1(cond | ADC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001100}
1101
1102
1103void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1104 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001105 addrmod1(cond | SBC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001106}
1107
1108
1109void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1110 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001111 addrmod1(cond | RSC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001112}
1113
1114
1115void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001116 addrmod1(cond | TST | S, src1, r0, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001117}
1118
1119
1120void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001121 addrmod1(cond | TEQ | S, src1, r0, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001122}
1123
1124
1125void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001126 addrmod1(cond | CMP | S, src1, r0, src2);
1127}
1128
1129
1130void Assembler::cmp_raw_immediate(
1131 Register src, int raw_immediate, Condition cond) {
1132 ASSERT(is_uint12(raw_immediate));
1133 emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
Steve Blocka7e24c12009-10-30 11:49:00 +00001134}
1135
1136
1137void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001138 addrmod1(cond | CMN | S, src1, r0, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001139}
1140
1141
1142void Assembler::orr(Register dst, Register src1, const Operand& src2,
1143 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001144 addrmod1(cond | ORR | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001145}
1146
1147
1148void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1149 if (dst.is(pc)) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001150 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001151 }
Steve Block6ded16b2010-05-10 14:33:55 +01001152 // Don't allow nop instructions in the form mov rn, rn to be generated using
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001153 // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1154 // or MarkCode(int/NopMarkerTypes) pseudo instructions.
Steve Block6ded16b2010-05-10 14:33:55 +01001155 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
Steve Block1e0659c2011-05-24 12:43:12 +01001156 addrmod1(cond | MOV | s, r0, dst, src);
Steve Blocka7e24c12009-10-30 11:49:00 +00001157}
1158
1159
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001160void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1161 ASSERT(immediate < 0x10000);
1162 mov(reg, Operand(immediate), LeaveCC, cond);
1163}
1164
1165
1166void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1167 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1168}
1169
1170
Steve Blocka7e24c12009-10-30 11:49:00 +00001171void Assembler::bic(Register dst, Register src1, const Operand& src2,
1172 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001173 addrmod1(cond | BIC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001174}
1175
1176
1177void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001178 addrmod1(cond | MVN | s, r0, dst, src);
Steve Blocka7e24c12009-10-30 11:49:00 +00001179}
1180
1181
Andrei Popescu31002712010-02-23 13:46:05 +00001182// Multiply instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001183void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1184 SBit s, Condition cond) {
1185 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1186 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1187 src2.code()*B8 | B7 | B4 | src1.code());
1188}
1189
1190
1191void Assembler::mul(Register dst, Register src1, Register src2,
1192 SBit s, Condition cond) {
1193 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1194 // dst goes in bits 16-19 for this instruction!
1195 emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
1196}
1197
1198
1199void Assembler::smlal(Register dstL,
1200 Register dstH,
1201 Register src1,
1202 Register src2,
1203 SBit s,
1204 Condition cond) {
1205 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1206 ASSERT(!dstL.is(dstH));
1207 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1208 src2.code()*B8 | B7 | B4 | src1.code());
1209}
1210
1211
1212void Assembler::smull(Register dstL,
1213 Register dstH,
1214 Register src1,
1215 Register src2,
1216 SBit s,
1217 Condition cond) {
1218 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1219 ASSERT(!dstL.is(dstH));
1220 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1221 src2.code()*B8 | B7 | B4 | src1.code());
1222}
1223
1224
1225void Assembler::umlal(Register dstL,
1226 Register dstH,
1227 Register src1,
1228 Register src2,
1229 SBit s,
1230 Condition cond) {
1231 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1232 ASSERT(!dstL.is(dstH));
1233 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1234 src2.code()*B8 | B7 | B4 | src1.code());
1235}
1236
1237
1238void Assembler::umull(Register dstL,
1239 Register dstH,
1240 Register src1,
1241 Register src2,
1242 SBit s,
1243 Condition cond) {
1244 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1245 ASSERT(!dstL.is(dstH));
1246 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1247 src2.code()*B8 | B7 | B4 | src1.code());
1248}
1249
1250
Andrei Popescu31002712010-02-23 13:46:05 +00001251// Miscellaneous arithmetic instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001252void Assembler::clz(Register dst, Register src, Condition cond) {
1253 // v5 and above.
1254 ASSERT(!dst.is(pc) && !src.is(pc));
1255 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
Steve Block1e0659c2011-05-24 12:43:12 +01001256 15*B8 | CLZ | src.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001257}
1258
1259
Kristian Monsen50ef84f2010-07-29 15:18:00 +01001260// Saturating instructions.
1261
1262// Unsigned saturate.
1263void Assembler::usat(Register dst,
1264 int satpos,
1265 const Operand& src,
1266 Condition cond) {
1267 // v6 and above.
Steve Block44f0eee2011-05-26 01:26:41 +01001268 ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
Kristian Monsen50ef84f2010-07-29 15:18:00 +01001269 ASSERT(!dst.is(pc) && !src.rm_.is(pc));
1270 ASSERT((satpos >= 0) && (satpos <= 31));
1271 ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1272 ASSERT(src.rs_.is(no_reg));
1273
1274 int sh = 0;
1275 if (src.shift_op_ == ASR) {
1276 sh = 1;
1277 }
1278
1279 emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1280 src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1281}
1282
1283
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001284// Bitfield manipulation instructions.
1285
1286// Unsigned bit field extract.
1287// Extracts #width adjacent bits from position #lsb in a register, and
1288// writes them to the low bits of a destination register.
1289// ubfx dst, src, #lsb, #width
1290void Assembler::ubfx(Register dst,
1291 Register src,
1292 int lsb,
1293 int width,
1294 Condition cond) {
1295 // v7 and above.
Steve Block44f0eee2011-05-26 01:26:41 +01001296 ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001297 ASSERT(!dst.is(pc) && !src.is(pc));
1298 ASSERT((lsb >= 0) && (lsb <= 31));
1299 ASSERT((width >= 1) && (width <= (32 - lsb)));
1300 emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1301 lsb*B7 | B6 | B4 | src.code());
1302}
1303
1304
1305// Signed bit field extract.
1306// Extracts #width adjacent bits from position #lsb in a register, and
1307// writes them to the low bits of a destination register. The extracted
1308// value is sign extended to fill the destination register.
1309// sbfx dst, src, #lsb, #width
1310void Assembler::sbfx(Register dst,
1311 Register src,
1312 int lsb,
1313 int width,
1314 Condition cond) {
1315 // v7 and above.
Steve Block44f0eee2011-05-26 01:26:41 +01001316 ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001317 ASSERT(!dst.is(pc) && !src.is(pc));
1318 ASSERT((lsb >= 0) && (lsb <= 31));
1319 ASSERT((width >= 1) && (width <= (32 - lsb)));
1320 emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1321 lsb*B7 | B6 | B4 | src.code());
1322}
1323
1324
1325// Bit field clear.
1326// Sets #width adjacent bits at position #lsb in the destination register
1327// to zero, preserving the value of the other bits.
1328// bfc dst, #lsb, #width
1329void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1330 // v7 and above.
Steve Block44f0eee2011-05-26 01:26:41 +01001331 ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001332 ASSERT(!dst.is(pc));
1333 ASSERT((lsb >= 0) && (lsb <= 31));
1334 ASSERT((width >= 1) && (width <= (32 - lsb)));
1335 int msb = lsb + width - 1;
1336 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1337}
1338
1339
1340// Bit field insert.
1341// Inserts #width adjacent bits from the low bits of the source register
1342// into position #lsb of the destination register.
1343// bfi dst, src, #lsb, #width
1344void Assembler::bfi(Register dst,
1345 Register src,
1346 int lsb,
1347 int width,
1348 Condition cond) {
1349 // v7 and above.
Steve Block44f0eee2011-05-26 01:26:41 +01001350 ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001351 ASSERT(!dst.is(pc) && !src.is(pc));
1352 ASSERT((lsb >= 0) && (lsb <= 31));
1353 ASSERT((width >= 1) && (width <= (32 - lsb)));
1354 int msb = lsb + width - 1;
1355 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1356 src.code());
1357}
1358
1359
Andrei Popescu31002712010-02-23 13:46:05 +00001360// Status register access instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001361void Assembler::mrs(Register dst, SRegister s, Condition cond) {
1362 ASSERT(!dst.is(pc));
1363 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1364}
1365
1366
1367void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1368 Condition cond) {
1369 ASSERT(fields >= B16 && fields < B20); // at least one field set
1370 Instr instr;
1371 if (!src.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001372 // Immediate.
Steve Blocka7e24c12009-10-30 11:49:00 +00001373 uint32_t rotate_imm;
1374 uint32_t immed_8;
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001375 if (src.must_use_constant_pool() ||
Steve Blocka7e24c12009-10-30 11:49:00 +00001376 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
Andrei Popescu31002712010-02-23 13:46:05 +00001377 // Immediate operand cannot be encoded, load it first to register ip.
Steve Blocka7e24c12009-10-30 11:49:00 +00001378 RecordRelocInfo(src.rmode_, src.imm32_);
1379 ldr(ip, MemOperand(pc, 0), cond);
1380 msr(fields, Operand(ip), cond);
1381 return;
1382 }
1383 instr = I | rotate_imm*B8 | immed_8;
1384 } else {
1385 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
1386 instr = src.rm_.code();
1387 }
1388 emit(cond | instr | B24 | B21 | fields | 15*B12);
1389}
1390
1391
Andrei Popescu31002712010-02-23 13:46:05 +00001392// Load/Store instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001393void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1394 if (dst.is(pc)) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001395 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001396 }
1397 addrmod2(cond | B26 | L, dst, src);
1398
Leon Clarkef7060e22010-06-03 12:02:55 +01001399 // Eliminate pattern: push(ry), pop(rx)
1400 // str(ry, MemOperand(sp, 4, NegPreIndex), al)
1401 // ldr(rx, MemOperand(sp, 4, PostIndex), al)
1402 // Both instructions can be eliminated if ry = rx.
1403 // If ry != rx, a register copy from ry to rx is inserted
1404 // after eliminating the push and the pop instructions.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001405 if (can_peephole_optimize(2)) {
1406 Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
1407 Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
Leon Clarkef7060e22010-06-03 12:02:55 +01001408
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001409 if (IsPush(push_instr) && IsPop(pop_instr)) {
Steve Block1e0659c2011-05-24 12:43:12 +01001410 if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001411 // For consecutive push and pop on different registers,
1412 // we delete both the push & pop and insert a register move.
1413 // push ry, pop rx --> mov rx, ry
1414 Register reg_pushed, reg_popped;
1415 reg_pushed = GetRd(push_instr);
1416 reg_popped = GetRd(pop_instr);
1417 pc_ -= 2 * kInstrSize;
1418 // Insert a mov instruction, which is better than a pair of push & pop
1419 mov(reg_popped, reg_pushed);
1420 if (FLAG_print_peephole_optimization) {
1421 PrintF("%x push/pop (diff reg) replaced by a reg move\n",
1422 pc_offset());
1423 }
1424 } else {
1425 // For consecutive push and pop on the same register,
1426 // both the push and the pop can be deleted.
1427 pc_ -= 2 * kInstrSize;
1428 if (FLAG_print_peephole_optimization) {
1429 PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1430 }
Leon Clarkef7060e22010-06-03 12:02:55 +01001431 }
1432 }
1433 }
1434
1435 if (can_peephole_optimize(2)) {
1436 Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
1437 Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
1438
1439 if ((IsStrRegFpOffset(str_instr) &&
1440 IsLdrRegFpOffset(ldr_instr)) ||
1441 (IsStrRegFpNegOffset(str_instr) &&
1442 IsLdrRegFpNegOffset(ldr_instr))) {
1443 if ((ldr_instr & kLdrStrInstrArgumentMask) ==
1444 (str_instr & kLdrStrInstrArgumentMask)) {
1445 // Pattern: Ldr/str same fp+offset, same register.
1446 //
1447 // The following:
1448 // str rx, [fp, #-12]
1449 // ldr rx, [fp, #-12]
1450 //
1451 // Becomes:
1452 // str rx, [fp, #-12]
1453
1454 pc_ -= 1 * kInstrSize;
1455 if (FLAG_print_peephole_optimization) {
1456 PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
1457 }
1458 } else if ((ldr_instr & kLdrStrOffsetMask) ==
1459 (str_instr & kLdrStrOffsetMask)) {
1460 // Pattern: Ldr/str same fp+offset, different register.
1461 //
1462 // The following:
1463 // str rx, [fp, #-12]
1464 // ldr ry, [fp, #-12]
1465 //
1466 // Becomes:
1467 // str rx, [fp, #-12]
1468 // mov ry, rx
1469
1470 Register reg_stored, reg_loaded;
1471 reg_stored = GetRd(str_instr);
1472 reg_loaded = GetRd(ldr_instr);
1473 pc_ -= 1 * kInstrSize;
1474 // Insert a mov instruction, which is better than ldr.
1475 mov(reg_loaded, reg_stored);
1476 if (FLAG_print_peephole_optimization) {
1477 PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
1478 }
1479 }
1480 }
1481 }
1482
1483 if (can_peephole_optimize(3)) {
1484 Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
1485 Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
1486 Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
1487 if (IsPush(mem_write_instr) &&
1488 IsPop(mem_read_instr)) {
1489 if ((IsLdrRegFpOffset(ldr_instr) ||
1490 IsLdrRegFpNegOffset(ldr_instr))) {
Steve Block1e0659c2011-05-24 12:43:12 +01001491 if (Instruction::RdValue(mem_write_instr) ==
1492 Instruction::RdValue(mem_read_instr)) {
Leon Clarkef7060e22010-06-03 12:02:55 +01001493 // Pattern: push & pop from/to same register,
1494 // with a fp+offset ldr in between
1495 //
1496 // The following:
1497 // str rx, [sp, #-4]!
1498 // ldr rz, [fp, #-24]
1499 // ldr rx, [sp], #+4
1500 //
1501 // Becomes:
1502 // if(rx == rz)
1503 // delete all
1504 // else
1505 // ldr rz, [fp, #-24]
1506
Steve Block1e0659c2011-05-24 12:43:12 +01001507 if (Instruction::RdValue(mem_write_instr) ==
1508 Instruction::RdValue(ldr_instr)) {
Leon Clarkef7060e22010-06-03 12:02:55 +01001509 pc_ -= 3 * kInstrSize;
1510 } else {
1511 pc_ -= 3 * kInstrSize;
1512 // Reinsert back the ldr rz.
1513 emit(ldr_instr);
1514 }
1515 if (FLAG_print_peephole_optimization) {
1516 PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
1517 }
1518 } else {
1519 // Pattern: push & pop from/to different registers
1520 // with a fp+offset ldr in between
1521 //
1522 // The following:
1523 // str rx, [sp, #-4]!
1524 // ldr rz, [fp, #-24]
1525 // ldr ry, [sp], #+4
1526 //
1527 // Becomes:
1528 // if(ry == rz)
1529 // mov ry, rx;
1530 // else if(rx != rz)
1531 // ldr rz, [fp, #-24]
1532 // mov ry, rx
1533 // else if((ry != rz) || (rx == rz)) becomes:
1534 // mov ry, rx
1535 // ldr rz, [fp, #-24]
1536
1537 Register reg_pushed, reg_popped;
Steve Block1e0659c2011-05-24 12:43:12 +01001538 if (Instruction::RdValue(mem_read_instr) ==
1539 Instruction::RdValue(ldr_instr)) {
Leon Clarkef7060e22010-06-03 12:02:55 +01001540 reg_pushed = GetRd(mem_write_instr);
1541 reg_popped = GetRd(mem_read_instr);
1542 pc_ -= 3 * kInstrSize;
1543 mov(reg_popped, reg_pushed);
Steve Block1e0659c2011-05-24 12:43:12 +01001544 } else if (Instruction::RdValue(mem_write_instr) !=
1545 Instruction::RdValue(ldr_instr)) {
Leon Clarkef7060e22010-06-03 12:02:55 +01001546 reg_pushed = GetRd(mem_write_instr);
1547 reg_popped = GetRd(mem_read_instr);
1548 pc_ -= 3 * kInstrSize;
1549 emit(ldr_instr);
1550 mov(reg_popped, reg_pushed);
Steve Block1e0659c2011-05-24 12:43:12 +01001551 } else if ((Instruction::RdValue(mem_read_instr) !=
1552 Instruction::RdValue(ldr_instr)) ||
1553 (Instruction::RdValue(mem_write_instr) ==
1554 Instruction::RdValue(ldr_instr))) {
Leon Clarkef7060e22010-06-03 12:02:55 +01001555 reg_pushed = GetRd(mem_write_instr);
1556 reg_popped = GetRd(mem_read_instr);
1557 pc_ -= 3 * kInstrSize;
1558 mov(reg_popped, reg_pushed);
1559 emit(ldr_instr);
1560 }
1561 if (FLAG_print_peephole_optimization) {
1562 PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
1563 }
1564 }
1565 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001566 }
1567 }
1568}
1569
1570
1571void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1572 addrmod2(cond | B26, src, dst);
1573
1574 // Eliminate pattern: pop(), push(r)
1575 // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
1576 // -> str r, [sp, 0], al
Leon Clarkef7060e22010-06-03 12:02:55 +01001577 if (can_peephole_optimize(2) &&
Andrei Popescu31002712010-02-23 13:46:05 +00001578 // Pattern.
Steve Blocka7e24c12009-10-30 11:49:00 +00001579 instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
1580 instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
1581 pc_ -= 2 * kInstrSize;
1582 emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
Leon Clarkef7060e22010-06-03 12:02:55 +01001583 if (FLAG_print_peephole_optimization) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001584 PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1585 }
1586 }
1587}
1588
1589
1590void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1591 addrmod2(cond | B26 | B | L, dst, src);
1592}
1593
1594
1595void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1596 addrmod2(cond | B26 | B, src, dst);
1597}
1598
1599
1600void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1601 addrmod3(cond | L | B7 | H | B4, dst, src);
1602}
1603
1604
1605void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1606 addrmod3(cond | B7 | H | B4, src, dst);
1607}
1608
1609
1610void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1611 addrmod3(cond | L | B7 | S6 | B4, dst, src);
1612}
1613
1614
1615void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1616 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1617}
1618
1619
Leon Clarkef7060e22010-06-03 12:02:55 +01001620void Assembler::ldrd(Register dst1, Register dst2,
1621 const MemOperand& src, Condition cond) {
Steve Block44f0eee2011-05-26 01:26:41 +01001622 ASSERT(isolate()->cpu_features()->IsEnabled(ARMv7));
Kristian Monsen25f61362010-05-21 11:50:48 +01001623 ASSERT(src.rm().is(no_reg));
Leon Clarkef7060e22010-06-03 12:02:55 +01001624 ASSERT(!dst1.is(lr)); // r14.
1625 ASSERT_EQ(0, dst1.code() % 2);
1626 ASSERT_EQ(dst1.code() + 1, dst2.code());
1627 addrmod3(cond | B7 | B6 | B4, dst1, src);
Kristian Monsen25f61362010-05-21 11:50:48 +01001628}
1629
1630
Leon Clarkef7060e22010-06-03 12:02:55 +01001631void Assembler::strd(Register src1, Register src2,
1632 const MemOperand& dst, Condition cond) {
Kristian Monsen25f61362010-05-21 11:50:48 +01001633 ASSERT(dst.rm().is(no_reg));
Leon Clarkef7060e22010-06-03 12:02:55 +01001634 ASSERT(!src1.is(lr)); // r14.
1635 ASSERT_EQ(0, src1.code() % 2);
1636 ASSERT_EQ(src1.code() + 1, src2.code());
Steve Block44f0eee2011-05-26 01:26:41 +01001637 ASSERT(isolate()->cpu_features()->IsEnabled(ARMv7));
Leon Clarkef7060e22010-06-03 12:02:55 +01001638 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
Kristian Monsen25f61362010-05-21 11:50:48 +01001639}
1640
Andrei Popescu31002712010-02-23 13:46:05 +00001641// Load/Store multiple instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001642void Assembler::ldm(BlockAddrMode am,
1643 Register base,
1644 RegList dst,
1645 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00001646 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
Steve Blocka7e24c12009-10-30 11:49:00 +00001647 ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
1648
1649 addrmod4(cond | B27 | am | L, base, dst);
1650
Andrei Popescu31002712010-02-23 13:46:05 +00001651 // Emit the constant pool after a function return implemented by ldm ..{..pc}.
Steve Blocka7e24c12009-10-30 11:49:00 +00001652 if (cond == al && (dst & pc.bit()) != 0) {
1653 // There is a slight chance that the ldm instruction was actually a call,
1654 // in which case it would be wrong to return into the constant pool; we
1655 // recognize this case by checking if the emission of the pool was blocked
1656 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
1657 // the case, we emit a jump over the pool.
1658 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
1659 }
1660}
1661
1662
1663void Assembler::stm(BlockAddrMode am,
1664 Register base,
1665 RegList src,
1666 Condition cond) {
1667 addrmod4(cond | B27 | am, base, src);
1668}
1669
1670
Andrei Popescu31002712010-02-23 13:46:05 +00001671// Exception-generating instructions and debugging support.
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001672// Stops with a non-negative code less than kNumOfWatchedStops support
1673// enabling/disabling and a counter feature. See simulator-arm.h .
1674void Assembler::stop(const char* msg, Condition cond, int32_t code) {
Andrei Popescu402d9372010-02-26 13:31:12 +00001675#ifndef __arm__
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001676 ASSERT(code >= kDefaultStopCode);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001677 // The Simulator will handle the stop instruction and get the message address.
1678 // It expects to find the address just after the svc instruction.
1679 BlockConstPoolFor(2);
1680 if (code >= 0) {
Steve Block1e0659c2011-05-24 12:43:12 +01001681 svc(kStopCode + code, cond);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001682 } else {
Steve Block1e0659c2011-05-24 12:43:12 +01001683 svc(kStopCode + kMaxStopCode, cond);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001684 }
1685 emit(reinterpret_cast<Instr>(msg));
Andrei Popescu402d9372010-02-26 13:31:12 +00001686#else // def __arm__
1687#ifdef CAN_USE_ARMV5_INSTRUCTIONS
Steve Block1e0659c2011-05-24 12:43:12 +01001688 if (cond != al) {
1689 Label skip;
1690 b(&skip, NegateCondition(cond));
1691 bkpt(0);
1692 bind(&skip);
1693 } else {
1694 bkpt(0);
1695 }
Andrei Popescu402d9372010-02-26 13:31:12 +00001696#else // ndef CAN_USE_ARMV5_INSTRUCTIONS
Ben Murdochb0fe1622011-05-05 13:52:32 +01001697 svc(0x9f0001, cond);
Andrei Popescu402d9372010-02-26 13:31:12 +00001698#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
1699#endif // def __arm__
Steve Blocka7e24c12009-10-30 11:49:00 +00001700}
1701
1702
1703void Assembler::bkpt(uint32_t imm16) { // v5 and above
1704 ASSERT(is_uint16(imm16));
Steve Block1e0659c2011-05-24 12:43:12 +01001705 emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
Steve Blocka7e24c12009-10-30 11:49:00 +00001706}
1707
1708
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001709void Assembler::svc(uint32_t imm24, Condition cond) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001710 ASSERT(is_uint24(imm24));
1711 emit(cond | 15*B24 | imm24);
1712}
1713
1714
Andrei Popescu31002712010-02-23 13:46:05 +00001715// Coprocessor instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001716void Assembler::cdp(Coprocessor coproc,
1717 int opcode_1,
1718 CRegister crd,
1719 CRegister crn,
1720 CRegister crm,
1721 int opcode_2,
1722 Condition cond) {
1723 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
1724 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
1725 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
1726}
1727
1728
1729void Assembler::cdp2(Coprocessor coproc,
1730 int opcode_1,
1731 CRegister crd,
1732 CRegister crn,
1733 CRegister crm,
1734 int opcode_2) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01001735 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00001736}
1737
1738
1739void Assembler::mcr(Coprocessor coproc,
1740 int opcode_1,
1741 Register rd,
1742 CRegister crn,
1743 CRegister crm,
1744 int opcode_2,
1745 Condition cond) {
1746 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1747 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
1748 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1749}
1750
1751
1752void Assembler::mcr2(Coprocessor coproc,
1753 int opcode_1,
1754 Register rd,
1755 CRegister crn,
1756 CRegister crm,
1757 int opcode_2) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01001758 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00001759}
1760
1761
1762void Assembler::mrc(Coprocessor coproc,
1763 int opcode_1,
1764 Register rd,
1765 CRegister crn,
1766 CRegister crm,
1767 int opcode_2,
1768 Condition cond) {
1769 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
1770 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
1771 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
1772}
1773
1774
1775void Assembler::mrc2(Coprocessor coproc,
1776 int opcode_1,
1777 Register rd,
1778 CRegister crn,
1779 CRegister crm,
1780 int opcode_2) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01001781 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00001782}
1783
1784
1785void Assembler::ldc(Coprocessor coproc,
1786 CRegister crd,
1787 const MemOperand& src,
1788 LFlag l,
1789 Condition cond) {
1790 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
1791}
1792
1793
1794void Assembler::ldc(Coprocessor coproc,
1795 CRegister crd,
1796 Register rn,
1797 int option,
1798 LFlag l,
1799 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00001800 // Unindexed addressing.
Steve Blocka7e24c12009-10-30 11:49:00 +00001801 ASSERT(is_uint8(option));
1802 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
1803 coproc*B8 | (option & 255));
1804}
1805
1806
1807void Assembler::ldc2(Coprocessor coproc,
1808 CRegister crd,
1809 const MemOperand& src,
1810 LFlag l) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01001811 ldc(coproc, crd, src, l, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00001812}
1813
1814
1815void Assembler::ldc2(Coprocessor coproc,
1816 CRegister crd,
1817 Register rn,
1818 int option,
1819 LFlag l) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01001820 ldc(coproc, crd, rn, option, l, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00001821}
1822
1823
1824void Assembler::stc(Coprocessor coproc,
1825 CRegister crd,
1826 const MemOperand& dst,
1827 LFlag l,
1828 Condition cond) {
1829 addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
1830}
1831
1832
1833void Assembler::stc(Coprocessor coproc,
1834 CRegister crd,
1835 Register rn,
1836 int option,
1837 LFlag l,
1838 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00001839 // Unindexed addressing.
Steve Blocka7e24c12009-10-30 11:49:00 +00001840 ASSERT(is_uint8(option));
1841 emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
1842 coproc*B8 | (option & 255));
1843}
1844
1845
1846void Assembler::stc2(Coprocessor
1847 coproc, CRegister crd,
1848 const MemOperand& dst,
1849 LFlag l) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01001850 stc(coproc, crd, dst, l, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00001851}
1852
1853
1854void Assembler::stc2(Coprocessor coproc,
1855 CRegister crd,
1856 Register rn,
1857 int option,
1858 LFlag l) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01001859 stc(coproc, crd, rn, option, l, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00001860}
1861
1862
Steve Blockd0582a62009-12-15 09:54:21 +00001863// Support for VFP.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001864
Leon Clarked91b9f72010-01-27 17:25:45 +00001865void Assembler::vldr(const DwVfpRegister dst,
1866 const Register base,
1867 int offset,
1868 const Condition cond) {
1869 // Ddst = MEM(Rbase + offset).
1870 // Instruction details available in ARM DDI 0406A, A8-628.
Ben Murdochb0fe1622011-05-05 13:52:32 +01001871 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
Leon Clarked91b9f72010-01-27 17:25:45 +00001872 // Vdst(15-12) | 1011(11-8) | offset
Steve Block44f0eee2011-05-26 01:26:41 +01001873 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001874 int u = 1;
1875 if (offset < 0) {
1876 offset = -offset;
1877 u = 0;
1878 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001879
Iain Merrick75681382010-08-19 15:07:18 +01001880 ASSERT(offset >= 0);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001881 if ((offset % 4) == 0 && (offset / 4) < 256) {
1882 emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
1883 0xB*B8 | ((offset / 4) & 255));
1884 } else {
1885 // Larger offsets must be handled by computing the correct address
1886 // in the ip register.
1887 ASSERT(!base.is(ip));
1888 if (u == 1) {
1889 add(ip, base, Operand(offset));
1890 } else {
1891 sub(ip, base, Operand(offset));
1892 }
1893 emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
1894 }
1895}
1896
1897
1898void Assembler::vldr(const DwVfpRegister dst,
1899 const MemOperand& operand,
1900 const Condition cond) {
1901 ASSERT(!operand.rm().is_valid());
1902 ASSERT(operand.am_ == Offset);
1903 vldr(dst, operand.rn(), operand.offset(), cond);
Leon Clarked91b9f72010-01-27 17:25:45 +00001904}
1905
1906
Steve Block6ded16b2010-05-10 14:33:55 +01001907void Assembler::vldr(const SwVfpRegister dst,
1908 const Register base,
1909 int offset,
1910 const Condition cond) {
1911 // Sdst = MEM(Rbase + offset).
1912 // Instruction details available in ARM DDI 0406A, A8-628.
Ben Murdochb0fe1622011-05-05 13:52:32 +01001913 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
Steve Block6ded16b2010-05-10 14:33:55 +01001914 // Vdst(15-12) | 1010(11-8) | offset
Steve Block44f0eee2011-05-26 01:26:41 +01001915 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001916 int u = 1;
1917 if (offset < 0) {
1918 offset = -offset;
1919 u = 0;
1920 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001921 int sd, d;
1922 dst.split_code(&sd, &d);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001923 ASSERT(offset >= 0);
1924
1925 if ((offset % 4) == 0 && (offset / 4) < 256) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001926 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
Steve Block6ded16b2010-05-10 14:33:55 +01001927 0xA*B8 | ((offset / 4) & 255));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001928 } else {
1929 // Larger offsets must be handled by computing the correct address
1930 // in the ip register.
1931 ASSERT(!base.is(ip));
1932 if (u == 1) {
1933 add(ip, base, Operand(offset));
1934 } else {
1935 sub(ip, base, Operand(offset));
1936 }
1937 emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
1938 }
1939}
1940
1941
1942void Assembler::vldr(const SwVfpRegister dst,
1943 const MemOperand& operand,
1944 const Condition cond) {
1945 ASSERT(!operand.rm().is_valid());
1946 ASSERT(operand.am_ == Offset);
1947 vldr(dst, operand.rn(), operand.offset(), cond);
Steve Block6ded16b2010-05-10 14:33:55 +01001948}
1949
1950
Leon Clarked91b9f72010-01-27 17:25:45 +00001951void Assembler::vstr(const DwVfpRegister src,
1952 const Register base,
1953 int offset,
1954 const Condition cond) {
1955 // MEM(Rbase + offset) = Dsrc.
1956 // Instruction details available in ARM DDI 0406A, A8-786.
Ben Murdochb0fe1622011-05-05 13:52:32 +01001957 // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
Leon Clarked91b9f72010-01-27 17:25:45 +00001958 // Vsrc(15-12) | 1011(11-8) | (offset/4)
Steve Block44f0eee2011-05-26 01:26:41 +01001959 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001960 int u = 1;
1961 if (offset < 0) {
1962 offset = -offset;
1963 u = 0;
1964 }
Iain Merrick75681382010-08-19 15:07:18 +01001965 ASSERT(offset >= 0);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001966 if ((offset % 4) == 0 && (offset / 4) < 256) {
1967 emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
1968 0xB*B8 | ((offset / 4) & 255));
1969 } else {
1970 // Larger offsets must be handled by computing the correct address
1971 // in the ip register.
1972 ASSERT(!base.is(ip));
1973 if (u == 1) {
1974 add(ip, base, Operand(offset));
1975 } else {
1976 sub(ip, base, Operand(offset));
1977 }
1978 emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
1979 }
1980}
1981
1982
1983void Assembler::vstr(const DwVfpRegister src,
1984 const MemOperand& operand,
1985 const Condition cond) {
1986 ASSERT(!operand.rm().is_valid());
1987 ASSERT(operand.am_ == Offset);
1988 vstr(src, operand.rn(), operand.offset(), cond);
Leon Clarked91b9f72010-01-27 17:25:45 +00001989}
1990
1991
Iain Merrick75681382010-08-19 15:07:18 +01001992void Assembler::vstr(const SwVfpRegister src,
1993 const Register base,
1994 int offset,
1995 const Condition cond) {
1996 // MEM(Rbase + offset) = SSrc.
1997 // Instruction details available in ARM DDI 0406A, A8-786.
Ben Murdochb0fe1622011-05-05 13:52:32 +01001998 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
Iain Merrick75681382010-08-19 15:07:18 +01001999 // Vdst(15-12) | 1010(11-8) | (offset/4)
Steve Block44f0eee2011-05-26 01:26:41 +01002000 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Ben Murdochb0fe1622011-05-05 13:52:32 +01002001 int u = 1;
2002 if (offset < 0) {
2003 offset = -offset;
2004 u = 0;
2005 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002006 int sd, d;
2007 src.split_code(&sd, &d);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002008 ASSERT(offset >= 0);
2009 if ((offset % 4) == 0 && (offset / 4) < 256) {
2010 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
2011 0xA*B8 | ((offset / 4) & 255));
2012 } else {
2013 // Larger offsets must be handled by computing the correct address
2014 // in the ip register.
2015 ASSERT(!base.is(ip));
2016 if (u == 1) {
2017 add(ip, base, Operand(offset));
2018 } else {
2019 sub(ip, base, Operand(offset));
2020 }
2021 emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2022 }
2023}
2024
2025
2026void Assembler::vstr(const SwVfpRegister src,
2027 const MemOperand& operand,
2028 const Condition cond) {
2029 ASSERT(!operand.rm().is_valid());
2030 ASSERT(operand.am_ == Offset);
2031 vldr(src, operand.rn(), operand.offset(), cond);
Iain Merrick75681382010-08-19 15:07:18 +01002032}
2033
2034
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002035static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2036 uint64_t i;
2037 memcpy(&i, &d, 8);
2038
2039 *lo = i & 0xffffffff;
2040 *hi = i >> 32;
2041}
2042
2043// Only works for little endian floating point formats.
2044// We don't support VFP on the mixed endian floating point platform.
2045static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
Steve Block44f0eee2011-05-26 01:26:41 +01002046 ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002047
2048 // VMOV can accept an immediate of the form:
2049 //
2050 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
2051 //
2052 // The immediate is encoded using an 8-bit quantity, comprised of two
2053 // 4-bit fields. For an 8-bit immediate of the form:
2054 //
2055 // [abcdefgh]
2056 //
2057 // where a is the MSB and h is the LSB, an immediate 64-bit double can be
2058 // created of the form:
2059 //
2060 // [aBbbbbbb,bbcdefgh,00000000,00000000,
2061 // 00000000,00000000,00000000,00000000]
2062 //
2063 // where B = ~b.
2064 //
2065
2066 uint32_t lo, hi;
2067 DoubleAsTwoUInt32(d, &lo, &hi);
2068
2069 // The most obvious constraint is the long block of zeroes.
2070 if ((lo != 0) || ((hi & 0xffff) != 0)) {
2071 return false;
2072 }
2073
2074 // Bits 62:55 must be all clear or all set.
2075 if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
2076 return false;
2077 }
2078
2079 // Bit 63 must be NOT bit 62.
2080 if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2081 return false;
2082 }
2083
2084 // Create the encoded immediate in the form:
2085 // [00000000,0000abcd,00000000,0000efgh]
2086 *encoding = (hi >> 16) & 0xf; // Low nybble.
2087 *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
2088 *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
2089
2090 return true;
2091}
2092
2093
2094void Assembler::vmov(const DwVfpRegister dst,
2095 double imm,
2096 const Condition cond) {
2097 // Dd = immediate
2098 // Instruction details available in ARM DDI 0406B, A8-640.
Steve Block44f0eee2011-05-26 01:26:41 +01002099 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002100
2101 uint32_t enc;
2102 if (FitsVMOVDoubleImmediate(imm, &enc)) {
2103 // The double can be encoded in the instruction.
2104 emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
2105 } else {
2106 // Synthesise the double from ARM immediates. This could be implemented
2107 // using vldr from a constant pool.
2108 uint32_t lo, hi;
2109 DoubleAsTwoUInt32(imm, &lo, &hi);
2110
2111 if (lo == hi) {
2112 // If the lo and hi parts of the double are equal, the literal is easier
2113 // to create. This is the case with 0.0.
2114 mov(ip, Operand(lo));
2115 vmov(dst, ip, ip);
2116 } else {
2117 // Move the low part of the double into the lower of the corresponsing S
2118 // registers of D register dst.
2119 mov(ip, Operand(lo));
2120 vmov(dst.low(), ip, cond);
2121
2122 // Move the high part of the double into the higher of the corresponsing S
2123 // registers of D register dst.
2124 mov(ip, Operand(hi));
2125 vmov(dst.high(), ip, cond);
2126 }
2127 }
2128}
2129
2130
2131void Assembler::vmov(const SwVfpRegister dst,
2132 const SwVfpRegister src,
2133 const Condition cond) {
2134 // Sd = Sm
2135 // Instruction details available in ARM DDI 0406B, A8-642.
Steve Block44f0eee2011-05-26 01:26:41 +01002136 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002137 int sd, d, sm, m;
2138 dst.split_code(&sd, &d);
2139 src.split_code(&sm, &m);
2140 emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002141}
2142
2143
Leon Clarkee46be812010-01-19 14:06:41 +00002144void Assembler::vmov(const DwVfpRegister dst,
Steve Block8defd9f2010-07-08 12:39:36 +01002145 const DwVfpRegister src,
2146 const Condition cond) {
2147 // Dd = Dm
2148 // Instruction details available in ARM DDI 0406B, A8-642.
Steve Block44f0eee2011-05-26 01:26:41 +01002149 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Steve Block8defd9f2010-07-08 12:39:36 +01002150 emit(cond | 0xE*B24 | 0xB*B20 |
2151 dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
2152}
2153
2154
2155void Assembler::vmov(const DwVfpRegister dst,
Leon Clarkee46be812010-01-19 14:06:41 +00002156 const Register src1,
2157 const Register src2,
2158 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00002159 // Dm = <Rt,Rt2>.
2160 // Instruction details available in ARM DDI 0406A, A8-646.
2161 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2162 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
Steve Block44f0eee2011-05-26 01:26:41 +01002163 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002164 ASSERT(!src1.is(pc) && !src2.is(pc));
2165 emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2166 src1.code()*B12 | 0xB*B8 | B4 | dst.code());
2167}
2168
2169
Leon Clarkee46be812010-01-19 14:06:41 +00002170void Assembler::vmov(const Register dst1,
2171 const Register dst2,
2172 const DwVfpRegister src,
2173 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00002174 // <Rt,Rt2> = Dm.
2175 // Instruction details available in ARM DDI 0406A, A8-646.
2176 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2177 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
Steve Block44f0eee2011-05-26 01:26:41 +01002178 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002179 ASSERT(!dst1.is(pc) && !dst2.is(pc));
2180 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2181 dst1.code()*B12 | 0xB*B8 | B4 | src.code());
2182}
2183
2184
Leon Clarkee46be812010-01-19 14:06:41 +00002185void Assembler::vmov(const SwVfpRegister dst,
Steve Blockd0582a62009-12-15 09:54:21 +00002186 const Register src,
Steve Blockd0582a62009-12-15 09:54:21 +00002187 const Condition cond) {
2188 // Sn = Rt.
2189 // Instruction details available in ARM DDI 0406A, A8-642.
2190 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2191 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
Steve Block44f0eee2011-05-26 01:26:41 +01002192 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002193 ASSERT(!src.is(pc));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002194 int sn, n;
2195 dst.split_code(&sn, &n);
2196 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
Steve Blockd0582a62009-12-15 09:54:21 +00002197}
2198
2199
Leon Clarkee46be812010-01-19 14:06:41 +00002200void Assembler::vmov(const Register dst,
2201 const SwVfpRegister src,
Steve Blockd0582a62009-12-15 09:54:21 +00002202 const Condition cond) {
2203 // Rt = Sn.
2204 // Instruction details available in ARM DDI 0406A, A8-642.
2205 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2206 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
Steve Block44f0eee2011-05-26 01:26:41 +01002207 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002208 ASSERT(!dst.is(pc));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002209 int sn, n;
2210 src.split_code(&sn, &n);
2211 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
Steve Blockd0582a62009-12-15 09:54:21 +00002212}
2213
2214
Steve Block6ded16b2010-05-10 14:33:55 +01002215// Type of data to read from or write to VFP register.
2216// Used as specifier in generic vcvt instruction.
2217enum VFPType { S32, U32, F32, F64 };
2218
2219
2220static bool IsSignedVFPType(VFPType type) {
2221 switch (type) {
2222 case S32:
2223 return true;
2224 case U32:
2225 return false;
2226 default:
2227 UNREACHABLE();
2228 return false;
2229 }
Steve Blockd0582a62009-12-15 09:54:21 +00002230}
2231
2232
Steve Block6ded16b2010-05-10 14:33:55 +01002233static bool IsIntegerVFPType(VFPType type) {
2234 switch (type) {
2235 case S32:
2236 case U32:
2237 return true;
2238 case F32:
2239 case F64:
2240 return false;
2241 default:
2242 UNREACHABLE();
2243 return false;
2244 }
2245}
2246
2247
2248static bool IsDoubleVFPType(VFPType type) {
2249 switch (type) {
2250 case F32:
2251 return false;
2252 case F64:
2253 return true;
2254 default:
2255 UNREACHABLE();
2256 return false;
2257 }
2258}
2259
2260
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002261// Split five bit reg_code based on size of reg_type.
2262// 32-bit register codes are Vm:M
2263// 64-bit register codes are M:Vm
2264// where Vm is four bits, and M is a single bit.
2265static void SplitRegCode(VFPType reg_type,
Steve Block6ded16b2010-05-10 14:33:55 +01002266 int reg_code,
2267 int* vm,
2268 int* m) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002269 ASSERT((reg_code >= 0) && (reg_code <= 31));
2270 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2271 // 32 bit type.
Steve Block6ded16b2010-05-10 14:33:55 +01002272 *m = reg_code & 0x1;
2273 *vm = reg_code >> 1;
2274 } else {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002275 // 64 bit type.
Steve Block6ded16b2010-05-10 14:33:55 +01002276 *m = (reg_code & 0x10) >> 4;
2277 *vm = reg_code & 0x0F;
2278 }
2279}
2280
2281
2282// Encode vcvt.src_type.dst_type instruction.
2283static Instr EncodeVCVT(const VFPType dst_type,
2284 const int dst_code,
2285 const VFPType src_type,
2286 const int src_code,
Steve Block1e0659c2011-05-24 12:43:12 +01002287 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002288 const Condition cond) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002289 ASSERT(src_type != dst_type);
2290 int D, Vd, M, Vm;
2291 SplitRegCode(src_type, src_code, &Vm, &M);
2292 SplitRegCode(dst_type, dst_code, &Vd, &D);
2293
Steve Block6ded16b2010-05-10 14:33:55 +01002294 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2295 // Conversion between IEEE floating point and 32-bit integer.
2296 // Instruction details available in ARM DDI 0406B, A8.6.295.
2297 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2298 // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2299 ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2300
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002301 int sz, opc2, op;
Steve Block6ded16b2010-05-10 14:33:55 +01002302
2303 if (IsIntegerVFPType(dst_type)) {
2304 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2305 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
Russell Brenner90bac252010-11-18 13:33:46 -08002306 op = mode;
Steve Block6ded16b2010-05-10 14:33:55 +01002307 } else {
2308 ASSERT(IsIntegerVFPType(src_type));
Steve Block6ded16b2010-05-10 14:33:55 +01002309 opc2 = 0x0;
2310 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2311 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
Steve Block6ded16b2010-05-10 14:33:55 +01002312 }
2313
2314 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2315 Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2316 } else {
2317 // Conversion between IEEE double and single precision.
2318 // Instruction details available in ARM DDI 0406B, A8.6.298.
2319 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2320 // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002321 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
Steve Block6ded16b2010-05-10 14:33:55 +01002322 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2323 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2324 }
2325}
2326
2327
2328void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2329 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002330 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002331 const Condition cond) {
Steve Block44f0eee2011-05-26 01:26:41 +01002332 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002333 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002334}
2335
2336
2337void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2338 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002339 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002340 const Condition cond) {
Steve Block44f0eee2011-05-26 01:26:41 +01002341 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002342 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002343}
2344
2345
2346void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2347 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002348 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002349 const Condition cond) {
Steve Block44f0eee2011-05-26 01:26:41 +01002350 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002351 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002352}
2353
2354
2355void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2356 const DwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002357 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002358 const Condition cond) {
Steve Block44f0eee2011-05-26 01:26:41 +01002359 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002360 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002361}
2362
2363
2364void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2365 const DwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002366 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002367 const Condition cond) {
Steve Block44f0eee2011-05-26 01:26:41 +01002368 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002369 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002370}
2371
2372
2373void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2374 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002375 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002376 const Condition cond) {
Steve Block44f0eee2011-05-26 01:26:41 +01002377 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002378 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002379}
2380
2381
2382void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2383 const DwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002384 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002385 const Condition cond) {
Steve Block44f0eee2011-05-26 01:26:41 +01002386 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002387 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
Steve Blockd0582a62009-12-15 09:54:21 +00002388}
2389
2390
Steve Block44f0eee2011-05-26 01:26:41 +01002391void Assembler::vneg(const DwVfpRegister dst,
2392 const DwVfpRegister src,
2393 const Condition cond) {
2394 emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
2395 0x5*B9 | B8 | B6 | src.code());
2396}
2397
2398
Steve Block1e0659c2011-05-24 12:43:12 +01002399void Assembler::vabs(const DwVfpRegister dst,
2400 const DwVfpRegister src,
2401 const Condition cond) {
2402 emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
2403 0x5*B9 | B8 | 0x3*B6 | src.code());
2404}
2405
2406
Leon Clarkee46be812010-01-19 14:06:41 +00002407void Assembler::vadd(const DwVfpRegister dst,
2408 const DwVfpRegister src1,
2409 const DwVfpRegister src2,
2410 const Condition cond) {
2411 // Dd = vadd(Dn, Dm) double precision floating point addition.
Steve Blockd0582a62009-12-15 09:54:21 +00002412 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2413 // Instruction details available in ARM DDI 0406A, A8-536.
2414 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2415 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
Steve Block44f0eee2011-05-26 01:26:41 +01002416 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002417 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2418 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2419}
2420
2421
Leon Clarkee46be812010-01-19 14:06:41 +00002422void Assembler::vsub(const DwVfpRegister dst,
2423 const DwVfpRegister src1,
2424 const DwVfpRegister src2,
2425 const Condition cond) {
2426 // Dd = vsub(Dn, Dm) double precision floating point subtraction.
Steve Blockd0582a62009-12-15 09:54:21 +00002427 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2428 // Instruction details available in ARM DDI 0406A, A8-784.
2429 // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
2430 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
Steve Block44f0eee2011-05-26 01:26:41 +01002431 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002432 emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
2433 dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
2434}
2435
2436
Leon Clarkee46be812010-01-19 14:06:41 +00002437void Assembler::vmul(const DwVfpRegister dst,
2438 const DwVfpRegister src1,
2439 const DwVfpRegister src2,
2440 const Condition cond) {
2441 // Dd = vmul(Dn, Dm) double precision floating point multiplication.
Steve Blockd0582a62009-12-15 09:54:21 +00002442 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2443 // Instruction details available in ARM DDI 0406A, A8-784.
2444 // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
2445 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
Steve Block44f0eee2011-05-26 01:26:41 +01002446 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002447 emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
2448 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2449}
2450
2451
Leon Clarkee46be812010-01-19 14:06:41 +00002452void Assembler::vdiv(const DwVfpRegister dst,
2453 const DwVfpRegister src1,
2454 const DwVfpRegister src2,
2455 const Condition cond) {
2456 // Dd = vdiv(Dn, Dm) double precision floating point division.
Steve Blockd0582a62009-12-15 09:54:21 +00002457 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
2458 // Instruction details available in ARM DDI 0406A, A8-584.
2459 // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
2460 // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
Steve Block44f0eee2011-05-26 01:26:41 +01002461 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002462 emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
2463 dst.code()*B12 | 0x5*B9 | B8 | src2.code());
2464}
2465
2466
Leon Clarkee46be812010-01-19 14:06:41 +00002467void Assembler::vcmp(const DwVfpRegister src1,
2468 const DwVfpRegister src2,
Steve Blockd0582a62009-12-15 09:54:21 +00002469 const Condition cond) {
2470 // vcmp(Dd, Dm) double precision floating point comparison.
2471 // Instruction details available in ARM DDI 0406A, A8-570.
2472 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
Ben Murdochb8e0da22011-05-16 14:20:40 +01002473 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
Steve Block44f0eee2011-05-26 01:26:41 +01002474 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002475 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
Ben Murdochb8e0da22011-05-16 14:20:40 +01002476 src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
Steve Blockd0582a62009-12-15 09:54:21 +00002477}
2478
2479
Iain Merrick75681382010-08-19 15:07:18 +01002480void Assembler::vcmp(const DwVfpRegister src1,
2481 const double src2,
Iain Merrick75681382010-08-19 15:07:18 +01002482 const Condition cond) {
2483 // vcmp(Dd, Dm) double precision floating point comparison.
2484 // Instruction details available in ARM DDI 0406A, A8-570.
2485 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
Ben Murdochb8e0da22011-05-16 14:20:40 +01002486 // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
Steve Block44f0eee2011-05-26 01:26:41 +01002487 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Iain Merrick75681382010-08-19 15:07:18 +01002488 ASSERT(src2 == 0.0);
2489 emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
Ben Murdochb8e0da22011-05-16 14:20:40 +01002490 src1.code()*B12 | 0x5*B9 | B8 | B6);
Iain Merrick75681382010-08-19 15:07:18 +01002491}
2492
2493
Russell Brenner90bac252010-11-18 13:33:46 -08002494void Assembler::vmsr(Register dst, Condition cond) {
2495 // Instruction details available in ARM DDI 0406A, A8-652.
2496 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
2497 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
Steve Block44f0eee2011-05-26 01:26:41 +01002498 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Russell Brenner90bac252010-11-18 13:33:46 -08002499 emit(cond | 0xE*B24 | 0xE*B20 | B16 |
2500 dst.code()*B12 | 0xA*B8 | B4);
2501}
2502
2503
Steve Blockd0582a62009-12-15 09:54:21 +00002504void Assembler::vmrs(Register dst, Condition cond) {
2505 // Instruction details available in ARM DDI 0406A, A8-652.
2506 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
2507 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
Steve Block44f0eee2011-05-26 01:26:41 +01002508 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Steve Blockd0582a62009-12-15 09:54:21 +00002509 emit(cond | 0xE*B24 | 0xF*B20 | B16 |
2510 dst.code()*B12 | 0xA*B8 | B4);
2511}
2512
2513
Steve Block8defd9f2010-07-08 12:39:36 +01002514void Assembler::vsqrt(const DwVfpRegister dst,
2515 const DwVfpRegister src,
2516 const Condition cond) {
2517 // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
2518 // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
Steve Block44f0eee2011-05-26 01:26:41 +01002519 ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
Steve Block8defd9f2010-07-08 12:39:36 +01002520 emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
2521 dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
2522}
2523
2524
Andrei Popescu31002712010-02-23 13:46:05 +00002525// Pseudo instructions.
Steve Block6ded16b2010-05-10 14:33:55 +01002526void Assembler::nop(int type) {
2527 // This is mov rx, rx.
2528 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2529 emit(al | 13*B21 | type*B12 | type);
2530}
2531
2532
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002533bool Assembler::IsNop(Instr instr, int type) {
Steve Block1e0659c2011-05-24 12:43:12 +01002534 // Check for mov rx, rx where x = type.
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002535 ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
2536 return instr == (al | 13*B21 | type*B12 | type);
2537}
2538
2539
Steve Blockd0582a62009-12-15 09:54:21 +00002540bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
2541 uint32_t dummy1;
2542 uint32_t dummy2;
2543 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
2544}
2545
2546
2547void Assembler::BlockConstPoolFor(int instructions) {
2548 BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
2549}
2550
2551
Andrei Popescu31002712010-02-23 13:46:05 +00002552// Debugging.
Steve Blocka7e24c12009-10-30 11:49:00 +00002553void Assembler::RecordJSReturn() {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08002554 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00002555 CheckBuffer();
2556 RecordRelocInfo(RelocInfo::JS_RETURN);
2557}
2558
2559
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002560void Assembler::RecordDebugBreakSlot() {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08002561 positions_recorder()->WriteRecordedPositions();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002562 CheckBuffer();
2563 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2564}
2565
2566
Steve Blocka7e24c12009-10-30 11:49:00 +00002567void Assembler::RecordComment(const char* msg) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01002568 if (FLAG_code_comments) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002569 CheckBuffer();
2570 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2571 }
2572}
2573
2574
Steve Blocka7e24c12009-10-30 11:49:00 +00002575void Assembler::GrowBuffer() {
2576 if (!own_buffer_) FATAL("external code buffer is too small");
2577
Andrei Popescu31002712010-02-23 13:46:05 +00002578 // Compute new buffer size.
Steve Blocka7e24c12009-10-30 11:49:00 +00002579 CodeDesc desc; // the new buffer
2580 if (buffer_size_ < 4*KB) {
2581 desc.buffer_size = 4*KB;
2582 } else if (buffer_size_ < 1*MB) {
2583 desc.buffer_size = 2*buffer_size_;
2584 } else {
2585 desc.buffer_size = buffer_size_ + 1*MB;
2586 }
2587 CHECK_GT(desc.buffer_size, 0); // no overflow
2588
Andrei Popescu31002712010-02-23 13:46:05 +00002589 // Setup new buffer.
Steve Blocka7e24c12009-10-30 11:49:00 +00002590 desc.buffer = NewArray<byte>(desc.buffer_size);
2591
2592 desc.instr_size = pc_offset();
2593 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2594
Andrei Popescu31002712010-02-23 13:46:05 +00002595 // Copy the data.
Steve Blocka7e24c12009-10-30 11:49:00 +00002596 int pc_delta = desc.buffer - buffer_;
2597 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2598 memmove(desc.buffer, buffer_, desc.instr_size);
2599 memmove(reloc_info_writer.pos() + rc_delta,
2600 reloc_info_writer.pos(), desc.reloc_size);
2601
Andrei Popescu31002712010-02-23 13:46:05 +00002602 // Switch buffers.
Steve Blocka7e24c12009-10-30 11:49:00 +00002603 DeleteArray(buffer_);
2604 buffer_ = desc.buffer;
2605 buffer_size_ = desc.buffer_size;
2606 pc_ += pc_delta;
2607 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2608 reloc_info_writer.last_pc() + pc_delta);
2609
Andrei Popescu31002712010-02-23 13:46:05 +00002610 // None of our relocation types are pc relative pointing outside the code
Steve Blocka7e24c12009-10-30 11:49:00 +00002611 // buffer nor pc absolute pointing inside the code buffer, so there is no need
Andrei Popescu31002712010-02-23 13:46:05 +00002612 // to relocate any emitted relocation entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00002613
Andrei Popescu31002712010-02-23 13:46:05 +00002614 // Relocate pending relocation entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00002615 for (int i = 0; i < num_prinfo_; i++) {
2616 RelocInfo& rinfo = prinfo_[i];
2617 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2618 rinfo.rmode() != RelocInfo::POSITION);
2619 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
2620 rinfo.set_pc(rinfo.pc() + pc_delta);
2621 }
2622 }
2623}
2624
2625
Ben Murdochb0fe1622011-05-05 13:52:32 +01002626void Assembler::db(uint8_t data) {
Ben Murdochb8e0da22011-05-16 14:20:40 +01002627 // No relocation info should be pending while using db. db is used
2628 // to write pure data with no pointers and the constant pool should
2629 // be emitted before using db.
2630 ASSERT(num_prinfo_ == 0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002631 CheckBuffer();
2632 *reinterpret_cast<uint8_t*>(pc_) = data;
2633 pc_ += sizeof(uint8_t);
2634}
2635
2636
2637void Assembler::dd(uint32_t data) {
Ben Murdochb8e0da22011-05-16 14:20:40 +01002638 // No relocation info should be pending while using dd. dd is used
2639 // to write pure data with no pointers and the constant pool should
2640 // be emitted before using dd.
2641 ASSERT(num_prinfo_ == 0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002642 CheckBuffer();
2643 *reinterpret_cast<uint32_t*>(pc_) = data;
2644 pc_ += sizeof(uint32_t);
2645}
2646
2647
Steve Blocka7e24c12009-10-30 11:49:00 +00002648void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2649 RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002650 if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
Andrei Popescu31002712010-02-23 13:46:05 +00002651 // Adjust code for new modes.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002652 ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2653 || RelocInfo::IsJSReturn(rmode)
Steve Blocka7e24c12009-10-30 11:49:00 +00002654 || RelocInfo::IsComment(rmode)
2655 || RelocInfo::IsPosition(rmode));
Andrei Popescu31002712010-02-23 13:46:05 +00002656 // These modes do not need an entry in the constant pool.
Steve Blocka7e24c12009-10-30 11:49:00 +00002657 } else {
2658 ASSERT(num_prinfo_ < kMaxNumPRInfo);
2659 prinfo_[num_prinfo_++] = rinfo;
2660 // Make sure the constant pool is not emitted in place of the next
Andrei Popescu31002712010-02-23 13:46:05 +00002661 // instruction for which we just recorded relocation info.
Steve Blocka7e24c12009-10-30 11:49:00 +00002662 BlockConstPoolBefore(pc_offset() + kInstrSize);
2663 }
2664 if (rinfo.rmode() != RelocInfo::NONE) {
2665 // Don't record external references unless the heap will be serialized.
Steve Blockd0582a62009-12-15 09:54:21 +00002666 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2667#ifdef DEBUG
2668 if (!Serializer::enabled()) {
2669 Serializer::TooLateToEnableNow();
2670 }
2671#endif
Steve Block44f0eee2011-05-26 01:26:41 +01002672 if (!Serializer::enabled() && !emit_debug_code()) {
Steve Blockd0582a62009-12-15 09:54:21 +00002673 return;
2674 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002675 }
2676 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
2677 reloc_info_writer.Write(&rinfo);
2678 }
2679}
2680
2681
2682void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2683 // Calculate the offset of the next check. It will be overwritten
2684 // when a const pool is generated or when const pools are being
2685 // blocked for a specific range.
2686 next_buffer_check_ = pc_offset() + kCheckConstInterval;
2687
Andrei Popescu31002712010-02-23 13:46:05 +00002688 // There is nothing to do if there are no pending relocation info entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00002689 if (num_prinfo_ == 0) return;
2690
2691 // We emit a constant pool at regular intervals of about kDistBetweenPools
2692 // or when requested by parameter force_emit (e.g. after each function).
2693 // We prefer not to emit a jump unless the max distance is reached or if we
2694 // are running low on slots, which can happen if a lot of constants are being
2695 // emitted (e.g. --debug-code and many static references).
2696 int dist = pc_offset() - last_const_pool_end_;
2697 if (!force_emit && dist < kMaxDistBetweenPools &&
2698 (require_jump || dist < kDistBetweenPools) &&
2699 // TODO(1236125): Cleanup the "magic" number below. We know that
2700 // the code generation will test every kCheckConstIntervalInst.
2701 // Thus we are safe as long as we generate less than 7 constant
2702 // entries per instruction.
2703 (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
2704 return;
2705 }
2706
2707 // If we did not return by now, we need to emit the constant pool soon.
2708
2709 // However, some small sequences of instructions must not be broken up by the
2710 // insertion of a constant pool; such sequences are protected by setting
Steve Block6ded16b2010-05-10 14:33:55 +01002711 // either const_pool_blocked_nesting_ or no_const_pool_before_, which are
2712 // both checked here. Also, recursive calls to CheckConstPool are blocked by
2713 // no_const_pool_before_.
2714 if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
Andrei Popescu31002712010-02-23 13:46:05 +00002715 // Emission is currently blocked; make sure we try again as soon as
2716 // possible.
Steve Block6ded16b2010-05-10 14:33:55 +01002717 if (const_pool_blocked_nesting_ > 0) {
2718 next_buffer_check_ = pc_offset() + kInstrSize;
2719 } else {
2720 next_buffer_check_ = no_const_pool_before_;
2721 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002722
Andrei Popescu31002712010-02-23 13:46:05 +00002723 // Something is wrong if emission is forced and blocked at the same time.
Steve Blocka7e24c12009-10-30 11:49:00 +00002724 ASSERT(!force_emit);
2725 return;
2726 }
2727
2728 int jump_instr = require_jump ? kInstrSize : 0;
2729
2730 // Check that the code buffer is large enough before emitting the constant
2731 // pool and relocation information (include the jump over the pool and the
2732 // constant pool marker).
2733 int max_needed_space =
2734 jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
2735 while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
2736
Andrei Popescu31002712010-02-23 13:46:05 +00002737 // Block recursive calls to CheckConstPool.
Steve Blocka7e24c12009-10-30 11:49:00 +00002738 BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
2739 num_prinfo_*kInstrSize);
2740 // Don't bother to check for the emit calls below.
2741 next_buffer_check_ = no_const_pool_before_;
2742
Andrei Popescu31002712010-02-23 13:46:05 +00002743 // Emit jump over constant pool if necessary.
Steve Blocka7e24c12009-10-30 11:49:00 +00002744 Label after_pool;
2745 if (require_jump) b(&after_pool);
2746
2747 RecordComment("[ Constant Pool");
2748
Andrei Popescu31002712010-02-23 13:46:05 +00002749 // Put down constant pool marker "Undefined instruction" as specified by
Steve Block44f0eee2011-05-26 01:26:41 +01002750 // A5.6 (ARMv7) Instruction set encoding.
2751 emit(kConstantPoolMarker | num_prinfo_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002752
Andrei Popescu31002712010-02-23 13:46:05 +00002753 // Emit constant pool entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00002754 for (int i = 0; i < num_prinfo_; i++) {
2755 RelocInfo& rinfo = prinfo_[i];
2756 ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2757 rinfo.rmode() != RelocInfo::POSITION &&
2758 rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
2759 Instr instr = instr_at(rinfo.pc());
2760
Andrei Popescu31002712010-02-23 13:46:05 +00002761 // Instruction to patch must be a ldr/str [pc, #offset].
2762 // P and U set, B and W clear, Rn == pc, offset12 still 0.
Steve Block1e0659c2011-05-24 12:43:12 +01002763 ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
Steve Blocka7e24c12009-10-30 11:49:00 +00002764 (2*B25 | P | U | pc.code()*B16));
2765 int delta = pc_ - rinfo.pc() - 8;
2766 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
2767 if (delta < 0) {
2768 instr &= ~U;
2769 delta = -delta;
2770 }
2771 ASSERT(is_uint12(delta));
2772 instr_at_put(rinfo.pc(), instr + delta);
2773 emit(rinfo.data());
2774 }
2775 num_prinfo_ = 0;
2776 last_const_pool_end_ = pc_offset();
2777
2778 RecordComment("]");
2779
2780 if (after_pool.is_linked()) {
2781 bind(&after_pool);
2782 }
2783
2784 // Since a constant pool was just emitted, move the check offset forward by
2785 // the standard interval.
2786 next_buffer_check_ = pc_offset() + kCheckConstInterval;
2787}
2788
2789
2790} } // namespace v8::internal
Leon Clarkef7060e22010-06-03 12:02:55 +01002791
2792#endif // V8_TARGET_ARCH_ARM