blob: 105d711c1fefb082c9dbbe66a70d4c62680edc0f [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
Leon Clarked91b9f72010-01-27 17:25:45 +000033// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
Ben Murdochb8a8cc12014-11-26 15:28:44 +000035// Copyright 2012 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +000036
Ben Murdochb8a8cc12014-11-26 15:28:44 +000037#include "src/v8.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000038
Ben Murdochb8a8cc12014-11-26 15:28:44 +000039#if V8_TARGET_ARCH_ARM
Leon Clarkef7060e22010-06-03 12:02:55 +010040
Ben Murdochb8a8cc12014-11-26 15:28:44 +000041#include "src/arm/assembler-arm-inl.h"
42#include "src/base/bits.h"
43#include "src/base/cpu.h"
44#include "src/macro-assembler.h"
45#include "src/serialize.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000046
47namespace v8 {
48namespace internal {
49
Ben Murdoch257744e2011-11-30 15:57:28 +000050// Get the CPU features enabled by the build. For cross compilation the
Ben Murdochb8a8cc12014-11-26 15:28:44 +000051// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
Ben Murdoch257744e2011-11-30 15:57:28 +000052// can be defined to enable ARMv7 and VFPv3 instructions when building the
53// snapshot.
Ben Murdochb8a8cc12014-11-26 15:28:44 +000054static unsigned CpuFeaturesImpliedByCompiler() {
55 unsigned answer = 0;
Andrei Popescu402d9372010-02-26 13:31:12 +000056#ifdef CAN_USE_ARMV7_INSTRUCTIONS
Ben Murdochb8a8cc12014-11-26 15:28:44 +000057 if (FLAG_enable_armv7) answer |= 1u << ARMv7;
58#endif // CAN_USE_ARMV7_INSTRUCTIONS
59#ifdef CAN_USE_VFP3_INSTRUCTIONS
60 if (FLAG_enable_vfp3) answer |= 1u << VFP3 | 1u << ARMv7;
61#endif // CAN_USE_VFP3_INSTRUCTIONS
62#ifdef CAN_USE_VFP32DREGS
63 if (FLAG_enable_32dregs) answer |= 1u << VFP32DREGS;
64#endif // CAN_USE_VFP32DREGS
65#ifdef CAN_USE_NEON
66 if (FLAG_enable_neon) answer |= 1u << NEON;
67#endif // CAN_USE_VFP32DREGS
68 if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
69 answer |= 1u << UNALIGNED_ACCESSES;
70 }
Ben Murdoch257744e2011-11-30 15:57:28 +000071
Andrei Popescu402d9372010-02-26 13:31:12 +000072 return answer;
73}
Andrei Popescu402d9372010-02-26 13:31:12 +000074
75
Ben Murdochb8a8cc12014-11-26 15:28:44 +000076void CpuFeatures::ProbeImpl(bool cross_compile) {
77 supported_ |= CpuFeaturesImpliedByCompiler();
78 cache_line_size_ = 64;
Ben Murdoch257744e2011-11-30 15:57:28 +000079
Ben Murdochb8a8cc12014-11-26 15:28:44 +000080 // Only use statically determined features for cross compile (snapshot).
81 if (cross_compile) return;
Ben Murdoch257744e2011-11-30 15:57:28 +000082
Andrei Popescu402d9372010-02-26 13:31:12 +000083#ifndef __arm__
Ben Murdochb8a8cc12014-11-26 15:28:44 +000084 // For the simulator build, use whatever the flags specify.
Andrei Popescu31002712010-02-23 13:46:05 +000085 if (FLAG_enable_armv7) {
Steve Block6ded16b2010-05-10 14:33:55 +010086 supported_ |= 1u << ARMv7;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000087 if (FLAG_enable_vfp3) supported_ |= 1u << VFP3;
88 if (FLAG_enable_neon) supported_ |= 1u << NEON | 1u << VFP32DREGS;
89 if (FLAG_enable_sudiv) supported_ |= 1u << SUDIV;
90 if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
91 if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
Andrei Popescu31002712010-02-23 13:46:05 +000092 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000093 if (FLAG_enable_mls) supported_ |= 1u << MLS;
94 if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
95
96#else // __arm__
97 // Probe for additional features at runtime.
98 base::CPU cpu;
99 if (FLAG_enable_vfp3 && cpu.has_vfp3()) {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100100 // This implementation also sets the VFP flags if runtime
101 // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
102 // 0406B, page A1-6.
103 supported_ |= 1u << VFP3 | 1u << ARMv7;
Steve Blockd0582a62009-12-15 09:54:21 +0000104 }
Andrei Popescu31002712010-02-23 13:46:05 +0000105
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000106 if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
107 if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
108 if (FLAG_enable_mls && cpu.has_thumb2()) supported_ |= 1u << MLS;
109
110 if (cpu.architecture() >= 7) {
111 if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400112 if (FLAG_enable_armv8 && cpu.architecture() >= 8) {
113 supported_ |= 1u << ARMv8;
114 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000115 if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
116 // Use movw/movt for QUALCOMM ARMv7 cores.
117 if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
118 supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
119 }
Andrei Popescu31002712010-02-23 13:46:05 +0000120 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000121
122 // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
123 if (cpu.implementer() == base::CPU::ARM &&
124 (cpu.part() == base::CPU::ARM_CORTEX_A5 ||
125 cpu.part() == base::CPU::ARM_CORTEX_A9)) {
126 cache_line_size_ = 32;
127 }
128
129 if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400130
131 if (cpu.implementer() == base::CPU::NVIDIA &&
132 cpu.variant() == base::CPU::NVIDIA_DENVER) {
133 supported_ |= 1u << COHERENT_CACHE;
134 }
Steve Block6ded16b2010-05-10 14:33:55 +0100135#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000136
137 DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7));
138}
139
140
141void CpuFeatures::PrintTarget() {
142 const char* arm_arch = NULL;
143 const char* arm_target_type = "";
144 const char* arm_no_probe = "";
145 const char* arm_fpu = "";
146 const char* arm_thumb = "";
147 const char* arm_float_abi = NULL;
148
149#if !defined __arm__
150 arm_target_type = " simulator";
151#endif
152
153#if defined ARM_TEST_NO_FEATURE_PROBE
154 arm_no_probe = " noprobe";
155#endif
156
157#if defined CAN_USE_ARMV7_INSTRUCTIONS
158 arm_arch = "arm v7";
159#else
160 arm_arch = "arm v6";
161#endif
162
163#if defined CAN_USE_NEON
164 arm_fpu = " neon";
165#elif defined CAN_USE_VFP3_INSTRUCTIONS
166# if defined CAN_USE_VFP32DREGS
167 arm_fpu = " vfp3";
168# else
169 arm_fpu = " vfp3-d16";
170# endif
171#else
172 arm_fpu = " vfp2";
173#endif
174
175#ifdef __arm__
176 arm_float_abi = base::OS::ArmUsingHardFloat() ? "hard" : "softfp";
177#elif USE_EABI_HARDFLOAT
178 arm_float_abi = "hard";
179#else
180 arm_float_abi = "softfp";
181#endif
182
183#if defined __arm__ && (defined __thumb__) || (defined __thumb2__)
184 arm_thumb = " thumb";
185#endif
186
187 printf("target%s%s %s%s%s %s\n",
188 arm_target_type, arm_no_probe, arm_arch, arm_fpu, arm_thumb,
189 arm_float_abi);
190}
191
192
193void CpuFeatures::PrintFeatures() {
194 printf(
195 "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400196 "MOVW_MOVT_IMMEDIATE_LOADS=%d COHERENT_CACHE=%d",
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000197 CpuFeatures::IsSupported(ARMv7),
198 CpuFeatures::IsSupported(VFP3),
199 CpuFeatures::IsSupported(VFP32DREGS),
200 CpuFeatures::IsSupported(NEON),
201 CpuFeatures::IsSupported(SUDIV),
202 CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400203 CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS),
204 CpuFeatures::IsSupported(COHERENT_CACHE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000205#ifdef __arm__
206 bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
207#elif USE_EABI_HARDFLOAT
208 bool eabi_hardfloat = true;
209#else
210 bool eabi_hardfloat = false;
211#endif
212 printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
213}
214
215
216// -----------------------------------------------------------------------------
217// Implementation of DwVfpRegister
218
219const char* DwVfpRegister::AllocationIndexToString(int index) {
220 DCHECK(index >= 0 && index < NumAllocatableRegisters());
221 DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
222 kNumReservedRegisters - 1);
223 if (index >= kDoubleRegZero.code()) index += kNumReservedRegisters;
224 return VFPRegisters::Name(index, true);
Steve Blockd0582a62009-12-15 09:54:21 +0000225}
226
227
Steve Blocka7e24c12009-10-30 11:49:00 +0000228// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000229// Implementation of RelocInfo
230
231const int RelocInfo::kApplyMask = 0;
232
233
Leon Clarkef7060e22010-06-03 12:02:55 +0100234bool RelocInfo::IsCodedSpecially() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000235 // The deserializer needs to know whether a pointer is specially coded.  Being
236 // specially coded on ARM means that it is a movw/movt instruction, or is an
237 // out of line constant pool entry.  These only occur if
238 // FLAG_enable_ool_constant_pool is true.
239 return FLAG_enable_ool_constant_pool;
240}
241
242
243bool RelocInfo::IsInConstantPool() {
244 return Assembler::is_constant_pool_load(pc_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100245}
246
247
Steve Blocka7e24c12009-10-30 11:49:00 +0000248void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
249 // Patch the code at the current address with the supplied instructions.
250 Instr* pc = reinterpret_cast<Instr*>(pc_);
251 Instr* instr = reinterpret_cast<Instr*>(instructions);
252 for (int i = 0; i < instruction_count; i++) {
253 *(pc + i) = *(instr + i);
254 }
255
256 // Indicate that code has changed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000257 CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
Steve Blocka7e24c12009-10-30 11:49:00 +0000258}
259
260
261// Patch the code at the current PC with a call to the target address.
262// Additional guard instructions can be added if required.
263void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
264 // Patch the code at the current address with a call to the target.
265 UNIMPLEMENTED();
266}
267
268
269// -----------------------------------------------------------------------------
270// Implementation of Operand and MemOperand
271// See assembler-arm-inl.h for inlined constructors
272
273Operand::Operand(Handle<Object> handle) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000274 AllowDeferredHandleDereference using_raw_address;
Steve Blocka7e24c12009-10-30 11:49:00 +0000275 rm_ = no_reg;
276 // Verify all Objects referred by code are NOT in new space.
277 Object* obj = *handle;
Steve Blocka7e24c12009-10-30 11:49:00 +0000278 if (obj->IsHeapObject()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000279 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +0000280 imm32_ = reinterpret_cast<intptr_t>(handle.location());
281 rmode_ = RelocInfo::EMBEDDED_OBJECT;
282 } else {
283 // no relocation needed
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000284 imm32_ = reinterpret_cast<intptr_t>(obj);
285 rmode_ = RelocInfo::NONE32;
Steve Blocka7e24c12009-10-30 11:49:00 +0000286 }
287}
288
289
290Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000291 DCHECK(is_uint5(shift_imm));
292
Steve Blocka7e24c12009-10-30 11:49:00 +0000293 rm_ = rm;
294 rs_ = no_reg;
295 shift_op_ = shift_op;
296 shift_imm_ = shift_imm & 31;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000297
298 if ((shift_op == ROR) && (shift_imm == 0)) {
299 // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
300 // RRX as ROR #0 (See below).
301 shift_op = LSL;
302 } else if (shift_op == RRX) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000303 // encoded as ROR with shift_imm == 0
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000304 DCHECK(shift_imm == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000305 shift_op_ = ROR;
306 shift_imm_ = 0;
307 }
308}
309
310
311Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000312 DCHECK(shift_op != RRX);
Steve Blocka7e24c12009-10-30 11:49:00 +0000313 rm_ = rm;
314 rs_ = no_reg;
315 shift_op_ = shift_op;
316 rs_ = rs;
317}
318
319
320MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
321 rn_ = rn;
322 rm_ = no_reg;
323 offset_ = offset;
324 am_ = am;
325}
326
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000327
Steve Blocka7e24c12009-10-30 11:49:00 +0000328MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
329 rn_ = rn;
330 rm_ = rm;
331 shift_op_ = LSL;
332 shift_imm_ = 0;
333 am_ = am;
334}
335
336
337MemOperand::MemOperand(Register rn, Register rm,
338 ShiftOp shift_op, int shift_imm, AddrMode am) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000339 DCHECK(is_uint5(shift_imm));
Steve Blocka7e24c12009-10-30 11:49:00 +0000340 rn_ = rn;
341 rm_ = rm;
342 shift_op_ = shift_op;
343 shift_imm_ = shift_imm & 31;
344 am_ = am;
345}
346
347
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000348NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
349 DCHECK((am == Offset) || (am == PostIndex));
350 rn_ = rn;
351 rm_ = (am == Offset) ? pc : sp;
352 SetAlignment(align);
353}
354
355
356NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
357 rn_ = rn;
358 rm_ = rm;
359 SetAlignment(align);
360}
361
362
363void NeonMemOperand::SetAlignment(int align) {
364 switch (align) {
365 case 0:
366 align_ = 0;
367 break;
368 case 64:
369 align_ = 1;
370 break;
371 case 128:
372 align_ = 2;
373 break;
374 case 256:
375 align_ = 3;
376 break;
377 default:
378 UNREACHABLE();
379 align_ = 0;
380 break;
381 }
382}
383
384
385NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
386 base_ = base;
387 switch (registers_count) {
388 case 1:
389 type_ = nlt_1;
390 break;
391 case 2:
392 type_ = nlt_2;
393 break;
394 case 3:
395 type_ = nlt_3;
396 break;
397 case 4:
398 type_ = nlt_4;
399 break;
400 default:
401 UNREACHABLE();
402 type_ = nlt_1;
403 break;
404 }
405}
406
407
Steve Blocka7e24c12009-10-30 11:49:00 +0000408// -----------------------------------------------------------------------------
Steve Block1e0659c2011-05-24 12:43:12 +0100409// Specific instructions, constants, and masks.
Steve Blocka7e24c12009-10-30 11:49:00 +0000410
Steve Blocka7e24c12009-10-30 11:49:00 +0000411// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
412// register r is not encoded.
Steve Block1e0659c2011-05-24 12:43:12 +0100413const Instr kPushRegPattern =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100414 al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
Steve Blocka7e24c12009-10-30 11:49:00 +0000415// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
416// register r is not encoded.
Steve Block1e0659c2011-05-24 12:43:12 +0100417const Instr kPopRegPattern =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100418 al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
Steve Block6ded16b2010-05-10 14:33:55 +0100419// ldr rd, [pc, #offset]
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000420const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
421const Instr kLdrPCImmedPattern = 5 * B24 | L | kRegister_pc_Code * B16;
422// ldr rd, [pp, #offset]
423const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
424const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16;
425// ldr rd, [pp, rn]
426const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
427const Instr kLdrPpRegPattern = 7 * B24 | L | kRegister_r8_Code * B16;
428// vldr dd, [pc, #offset]
429const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
430const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
431// vldr dd, [pp, #offset]
432const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
433const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
Steve Block6ded16b2010-05-10 14:33:55 +0100434// blxcc rm
435const Instr kBlxRegMask =
436 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
437const Instr kBlxRegPattern =
Steve Block1e0659c2011-05-24 12:43:12 +0100438 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100439const Instr kBlxIp = al | kBlxRegPattern | ip.code();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100440const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
441const Instr kMovMvnPattern = 0xd * B21;
442const Instr kMovMvnFlip = B22;
443const Instr kMovLeaveCCMask = 0xdff * B16;
444const Instr kMovLeaveCCPattern = 0x1a0 * B16;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100445const Instr kMovwPattern = 0x30 * B20;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000446const Instr kMovtPattern = 0x34 * B20;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100447const Instr kMovwLeaveCCFlip = 0x5 * B21;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000448const Instr kMovImmedMask = 0x7f * B21;
449const Instr kMovImmedPattern = 0x1d * B21;
450const Instr kOrrImmedMask = 0x7f * B21;
451const Instr kOrrImmedPattern = 0x1c * B21;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100452const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
453const Instr kCmpCmnPattern = 0x15 * B20;
454const Instr kCmpCmnFlip = B21;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100455const Instr kAddSubFlip = 0x6 * B21;
456const Instr kAndBicFlip = 0xe * B21;
457
Leon Clarkef7060e22010-06-03 12:02:55 +0100458// A mask for the Rd register for push, pop, ldr, str instructions.
Steve Block1e0659c2011-05-24 12:43:12 +0100459const Instr kLdrRegFpOffsetPattern =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100460 al | B26 | L | Offset | kRegister_fp_Code * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100461const Instr kStrRegFpOffsetPattern =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100462 al | B26 | Offset | kRegister_fp_Code * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100463const Instr kLdrRegFpNegOffsetPattern =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100464 al | B26 | L | NegOffset | kRegister_fp_Code * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100465const Instr kStrRegFpNegOffsetPattern =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100466 al | B26 | NegOffset | kRegister_fp_Code * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100467const Instr kLdrStrInstrTypeMask = 0xffff0000;
Steve Block1e0659c2011-05-24 12:43:12 +0100468
Steve Blocka7e24c12009-10-30 11:49:00 +0000469
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000470Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
471 : AssemblerBase(isolate, buffer, buffer_size),
472 recorded_ast_id_(TypeFeedbackId::None()),
473 constant_pool_builder_(),
474 positions_recorder_(this) {
475 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
476 num_pending_32_bit_reloc_info_ = 0;
477 num_pending_64_bit_reloc_info_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000478 next_buffer_check_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100479 const_pool_blocked_nesting_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000480 no_const_pool_before_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000481 first_const_pool_32_use_ = -1;
482 first_const_pool_64_use_ = -1;
Steve Blocka7e24c12009-10-30 11:49:00 +0000483 last_bound_pos_ = 0;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000484 ClearRecordedAstId();
Steve Blocka7e24c12009-10-30 11:49:00 +0000485}
486
487
488Assembler::~Assembler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000489 DCHECK(const_pool_blocked_nesting_ == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000490}
491
492
493void Assembler::GetCode(CodeDesc* desc) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000494 if (!FLAG_enable_ool_constant_pool) {
495 // Emit constant pool if necessary.
496 CheckConstPool(true, false);
497 DCHECK(num_pending_32_bit_reloc_info_ == 0);
498 DCHECK(num_pending_64_bit_reloc_info_ == 0);
499 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100500 // Set up code descriptor.
Steve Blocka7e24c12009-10-30 11:49:00 +0000501 desc->buffer = buffer_;
502 desc->buffer_size = buffer_size_;
503 desc->instr_size = pc_offset();
504 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000505 desc->origin = this;
Steve Blocka7e24c12009-10-30 11:49:00 +0000506}
507
508
509void Assembler::Align(int m) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000510 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
Steve Blocka7e24c12009-10-30 11:49:00 +0000511 while ((pc_offset() & (m - 1)) != 0) {
512 nop();
513 }
514}
515
516
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100517void Assembler::CodeTargetAlign() {
518 // Preferred alignment of jump targets on some ARM chips.
519 Align(8);
520}
521
522
Steve Block1e0659c2011-05-24 12:43:12 +0100523Condition Assembler::GetCondition(Instr instr) {
524 return Instruction::ConditionField(instr);
525}
526
527
Steve Block6ded16b2010-05-10 14:33:55 +0100528bool Assembler::IsBranch(Instr instr) {
529 return (instr & (B27 | B25)) == (B27 | B25);
530}
531
532
533int Assembler::GetBranchOffset(Instr instr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000534 DCHECK(IsBranch(instr));
Steve Block6ded16b2010-05-10 14:33:55 +0100535 // Take the jump offset in the lower 24 bits, sign extend it and multiply it
536 // with 4 to get the offset in bytes.
Steve Block1e0659c2011-05-24 12:43:12 +0100537 return ((instr & kImm24Mask) << 8) >> 6;
Steve Block6ded16b2010-05-10 14:33:55 +0100538}
539
540
541bool Assembler::IsLdrRegisterImmediate(Instr instr) {
542 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
543}
544
545
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000546bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
547 return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
548}
549
550
Steve Block6ded16b2010-05-10 14:33:55 +0100551int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000552 DCHECK(IsLdrRegisterImmediate(instr));
Steve Block6ded16b2010-05-10 14:33:55 +0100553 bool positive = (instr & B23) == B23;
Steve Block1e0659c2011-05-24 12:43:12 +0100554 int offset = instr & kOff12Mask; // Zero extended offset.
Steve Block6ded16b2010-05-10 14:33:55 +0100555 return positive ? offset : -offset;
556}
557
558
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000559int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
560 DCHECK(IsVldrDRegisterImmediate(instr));
561 bool positive = (instr & B23) == B23;
562 int offset = instr & kOff8Mask; // Zero extended offset.
563 offset <<= 2;
564 return positive ? offset : -offset;
565}
566
567
Steve Block6ded16b2010-05-10 14:33:55 +0100568Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000569 DCHECK(IsLdrRegisterImmediate(instr));
Steve Block6ded16b2010-05-10 14:33:55 +0100570 bool positive = offset >= 0;
571 if (!positive) offset = -offset;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000572 DCHECK(is_uint12(offset));
Steve Block6ded16b2010-05-10 14:33:55 +0100573 // Set bit indicating whether the offset should be added.
574 instr = (instr & ~B23) | (positive ? B23 : 0);
575 // Set the actual offset.
Steve Block1e0659c2011-05-24 12:43:12 +0100576 return (instr & ~kOff12Mask) | offset;
Steve Block6ded16b2010-05-10 14:33:55 +0100577}
578
579
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000580Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
581 DCHECK(IsVldrDRegisterImmediate(instr));
582 DCHECK((offset & ~3) == offset); // Must be 64-bit aligned.
583 bool positive = offset >= 0;
584 if (!positive) offset = -offset;
585 DCHECK(is_uint10(offset));
586 // Set bit indicating whether the offset should be added.
587 instr = (instr & ~B23) | (positive ? B23 : 0);
588 // Set the actual offset. Its bottom 2 bits are zero.
589 return (instr & ~kOff8Mask) | (offset >> 2);
590}
591
592
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100593bool Assembler::IsStrRegisterImmediate(Instr instr) {
594 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
595}
596
597
598Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000599 DCHECK(IsStrRegisterImmediate(instr));
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100600 bool positive = offset >= 0;
601 if (!positive) offset = -offset;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000602 DCHECK(is_uint12(offset));
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100603 // Set bit indicating whether the offset should be added.
604 instr = (instr & ~B23) | (positive ? B23 : 0);
605 // Set the actual offset.
Steve Block1e0659c2011-05-24 12:43:12 +0100606 return (instr & ~kOff12Mask) | offset;
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100607}
608
609
610bool Assembler::IsAddRegisterImmediate(Instr instr) {
611 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
612}
613
614
615Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000616 DCHECK(IsAddRegisterImmediate(instr));
617 DCHECK(offset >= 0);
618 DCHECK(is_uint12(offset));
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100619 // Set the offset.
Steve Block1e0659c2011-05-24 12:43:12 +0100620 return (instr & ~kOff12Mask) | offset;
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100621}
622
623
Leon Clarkef7060e22010-06-03 12:02:55 +0100624Register Assembler::GetRd(Instr instr) {
625 Register reg;
Steve Block1e0659c2011-05-24 12:43:12 +0100626 reg.code_ = Instruction::RdValue(instr);
627 return reg;
628}
629
630
631Register Assembler::GetRn(Instr instr) {
632 Register reg;
633 reg.code_ = Instruction::RnValue(instr);
634 return reg;
635}
636
637
638Register Assembler::GetRm(Instr instr) {
639 Register reg;
640 reg.code_ = Instruction::RmValue(instr);
Leon Clarkef7060e22010-06-03 12:02:55 +0100641 return reg;
642}
643
644
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000645Instr Assembler::GetConsantPoolLoadPattern() {
646 if (FLAG_enable_ool_constant_pool) {
647 return kLdrPpImmedPattern;
648 } else {
649 return kLdrPCImmedPattern;
650 }
651}
652
653
654Instr Assembler::GetConsantPoolLoadMask() {
655 if (FLAG_enable_ool_constant_pool) {
656 return kLdrPpImmedMask;
657 } else {
658 return kLdrPCImmedMask;
659 }
660}
661
662
Leon Clarkef7060e22010-06-03 12:02:55 +0100663bool Assembler::IsPush(Instr instr) {
664 return ((instr & ~kRdMask) == kPushRegPattern);
665}
666
667
668bool Assembler::IsPop(Instr instr) {
669 return ((instr & ~kRdMask) == kPopRegPattern);
670}
671
672
673bool Assembler::IsStrRegFpOffset(Instr instr) {
674 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
675}
676
677
678bool Assembler::IsLdrRegFpOffset(Instr instr) {
679 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
680}
681
682
683bool Assembler::IsStrRegFpNegOffset(Instr instr) {
684 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
685}
686
687
688bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
689 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
690}
691
692
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800693bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
694 // Check the instruction is indeed a
695 // ldr<cond> <Rd>, [pc +/- offset_12].
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000696 return (instr & kLdrPCImmedMask) == kLdrPCImmedPattern;
697}
698
699
700bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
701 // Check the instruction is indeed a
702 // ldr<cond> <Rd>, [pp +/- offset_12].
703 return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern;
704}
705
706
707bool Assembler::IsLdrPpRegOffset(Instr instr) {
708 // Check the instruction is indeed a
709 // ldr<cond> <Rd>, [pp, +/- <Rm>].
710 return (instr & kLdrPpRegMask) == kLdrPpRegPattern;
711}
712
713
714Instr Assembler::GetLdrPpRegOffsetPattern() { return kLdrPpRegPattern; }
715
716
717bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
718 // Check the instruction is indeed a
719 // vldr<cond> <Dd>, [pc +/- offset_10].
720 return (instr & kVldrDPCMask) == kVldrDPCPattern;
721}
722
723
724bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
725 // Check the instruction is indeed a
726 // vldr<cond> <Dd>, [pp +/- offset_10].
727 return (instr & kVldrDPpMask) == kVldrDPpPattern;
728}
729
730
731bool Assembler::IsBlxReg(Instr instr) {
732 // Check the instruction is indeed a
733 // blxcc <Rm>
734 return (instr & kBlxRegMask) == kBlxRegPattern;
735}
736
737
738bool Assembler::IsBlxIp(Instr instr) {
739 // Check the instruction is indeed a
740 // blx ip
741 return instr == kBlxIp;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800742}
743
744
Steve Block1e0659c2011-05-24 12:43:12 +0100745bool Assembler::IsTstImmediate(Instr instr) {
746 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
747 (I | TST | S);
748}
749
750
751bool Assembler::IsCmpRegister(Instr instr) {
752 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
753 (CMP | S);
754}
755
756
757bool Assembler::IsCmpImmediate(Instr instr) {
758 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
759 (I | CMP | S);
760}
761
762
763Register Assembler::GetCmpImmediateRegister(Instr instr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000764 DCHECK(IsCmpImmediate(instr));
Steve Block1e0659c2011-05-24 12:43:12 +0100765 return GetRn(instr);
766}
767
768
769int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000770 DCHECK(IsCmpImmediate(instr));
Steve Block1e0659c2011-05-24 12:43:12 +0100771 return instr & kOff12Mask;
772}
773
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000774
Steve Blocka7e24c12009-10-30 11:49:00 +0000775// Labels refer to positions in the (to be) generated code.
776// There are bound, linked, and unused labels.
777//
778// Bound labels refer to known positions in the already
779// generated code. pos() is the position the label refers to.
780//
781// Linked labels refer to unknown positions in the code
782// to be generated; pos() is the position of the last
783// instruction using the label.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000784//
785// The linked labels form a link chain by making the branch offset
786// in the instruction steam to point to the previous branch
787// instruction using the same label.
788//
789// The link chain is terminated by a branch offset pointing to the
790// same position.
Steve Blocka7e24c12009-10-30 11:49:00 +0000791
792
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000793int Assembler::target_at(int pos) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000794 Instr instr = instr_at(pos);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000795 if (is_uint24(instr)) {
796 // Emitted link to a label, not part of a branch.
797 return instr;
Steve Blocka7e24c12009-10-30 11:49:00 +0000798 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000799 DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
Steve Block1e0659c2011-05-24 12:43:12 +0100800 int imm26 = ((instr & kImm24Mask) << 8) >> 6;
801 if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
802 ((instr & B24) != 0)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000803 // blx uses bit 24 to encode bit 2 of imm26
804 imm26 += 2;
Steve Block6ded16b2010-05-10 14:33:55 +0100805 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000806 return pos + kPcLoadDelta + imm26;
807}
808
809
810void Assembler::target_at_put(int pos, int target_pos) {
811 Instr instr = instr_at(pos);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000812 if (is_uint24(instr)) {
813 DCHECK(target_pos == pos || target_pos >= 0);
814 // Emitted link to a label, not part of a branch.
815 // Load the position of the label relative to the generated code object
816 // pointer in a register.
817
818 // Here are the instructions we need to emit:
819 // For ARMv7: target24 => target16_1:target16_0
820 // movw dst, #target16_0
821 // movt dst, #target16_1
822 // For ARMv6: target24 => target8_2:target8_1:target8_0
823 // mov dst, #target8_0
824 // orr dst, dst, #target8_1 << 8
825 // orr dst, dst, #target8_2 << 16
826
827 // We extract the destination register from the emitted nop instruction.
828 Register dst = Register::from_code(
829 Instruction::RmValue(instr_at(pos + kInstrSize)));
830 DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
831 uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
832 DCHECK(is_uint24(target24));
833 if (is_uint8(target24)) {
834 // If the target fits in a byte then only patch with a mov
835 // instruction.
836 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
837 1,
838 CodePatcher::DONT_FLUSH);
839 patcher.masm()->mov(dst, Operand(target24));
840 } else {
841 uint16_t target16_0 = target24 & kImm16Mask;
842 uint16_t target16_1 = target24 >> 16;
843 if (CpuFeatures::IsSupported(ARMv7)) {
844 // Patch with movw/movt.
845 if (target16_1 == 0) {
846 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
847 1,
848 CodePatcher::DONT_FLUSH);
849 patcher.masm()->movw(dst, target16_0);
850 } else {
851 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
852 2,
853 CodePatcher::DONT_FLUSH);
854 patcher.masm()->movw(dst, target16_0);
855 patcher.masm()->movt(dst, target16_1);
856 }
857 } else {
858 // Patch with a sequence of mov/orr/orr instructions.
859 uint8_t target8_0 = target16_0 & kImm8Mask;
860 uint8_t target8_1 = target16_0 >> 8;
861 uint8_t target8_2 = target16_1 & kImm8Mask;
862 if (target8_2 == 0) {
863 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
864 2,
865 CodePatcher::DONT_FLUSH);
866 patcher.masm()->mov(dst, Operand(target8_0));
867 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
868 } else {
869 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
870 3,
871 CodePatcher::DONT_FLUSH);
872 patcher.masm()->mov(dst, Operand(target8_0));
873 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
874 patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
875 }
876 }
877 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000878 return;
879 }
880 int imm26 = target_pos - (pos + kPcLoadDelta);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000881 DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
Steve Block1e0659c2011-05-24 12:43:12 +0100882 if (Instruction::ConditionField(instr) == kSpecialCondition) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000883 // blx uses bit 24 to encode bit 2 of imm26
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000884 DCHECK((imm26 & 1) == 0);
Steve Block1e0659c2011-05-24 12:43:12 +0100885 instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
Steve Blocka7e24c12009-10-30 11:49:00 +0000886 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000887 DCHECK((imm26 & 3) == 0);
Steve Block1e0659c2011-05-24 12:43:12 +0100888 instr &= ~kImm24Mask;
Steve Blocka7e24c12009-10-30 11:49:00 +0000889 }
890 int imm24 = imm26 >> 2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000891 DCHECK(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +0100892 instr_at_put(pos, instr | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +0000893}
894
895
896void Assembler::print(Label* L) {
897 if (L->is_unused()) {
898 PrintF("unused label\n");
899 } else if (L->is_bound()) {
900 PrintF("bound label to %d\n", L->pos());
901 } else if (L->is_linked()) {
902 Label l = *L;
903 PrintF("unbound label");
904 while (l.is_linked()) {
905 PrintF("@ %d ", l.pos());
906 Instr instr = instr_at(l.pos());
Steve Block1e0659c2011-05-24 12:43:12 +0100907 if ((instr & ~kImm24Mask) == 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000908 PrintF("value\n");
909 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000910 DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx
Steve Block1e0659c2011-05-24 12:43:12 +0100911 Condition cond = Instruction::ConditionField(instr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000912 const char* b;
913 const char* c;
Steve Block1e0659c2011-05-24 12:43:12 +0100914 if (cond == kSpecialCondition) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000915 b = "blx";
916 c = "";
917 } else {
918 if ((instr & B24) != 0)
919 b = "bl";
920 else
921 b = "b";
922
923 switch (cond) {
924 case eq: c = "eq"; break;
925 case ne: c = "ne"; break;
926 case hs: c = "hs"; break;
927 case lo: c = "lo"; break;
928 case mi: c = "mi"; break;
929 case pl: c = "pl"; break;
930 case vs: c = "vs"; break;
931 case vc: c = "vc"; break;
932 case hi: c = "hi"; break;
933 case ls: c = "ls"; break;
934 case ge: c = "ge"; break;
935 case lt: c = "lt"; break;
936 case gt: c = "gt"; break;
937 case le: c = "le"; break;
938 case al: c = ""; break;
939 default:
940 c = "";
941 UNREACHABLE();
942 }
943 }
944 PrintF("%s%s\n", b, c);
945 }
946 next(&l);
947 }
948 } else {
949 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
950 }
951}
952
953
954void Assembler::bind_to(Label* L, int pos) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000955 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
Steve Blocka7e24c12009-10-30 11:49:00 +0000956 while (L->is_linked()) {
957 int fixup_pos = L->pos();
958 next(L); // call next before overwriting link with target at fixup_pos
959 target_at_put(fixup_pos, pos);
960 }
961 L->bind_to(pos);
962
963 // Keep track of the last bound label so we don't eliminate any instructions
964 // before a bound label.
965 if (pos > last_bound_pos_)
966 last_bound_pos_ = pos;
967}
968
969
Steve Blocka7e24c12009-10-30 11:49:00 +0000970void Assembler::bind(Label* L) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000971 DCHECK(!L->is_bound()); // label can only be bound once
Steve Blocka7e24c12009-10-30 11:49:00 +0000972 bind_to(L, pc_offset());
973}
974
975
976void Assembler::next(Label* L) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000977 DCHECK(L->is_linked());
Steve Blocka7e24c12009-10-30 11:49:00 +0000978 int link = target_at(L->pos());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000979 if (link == L->pos()) {
980 // Branch target points to the same instuction. This is the end of the link
981 // chain.
Steve Blocka7e24c12009-10-30 11:49:00 +0000982 L->Unuse();
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000983 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000984 DCHECK(link >= 0);
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000985 L->link_to(link);
Steve Blocka7e24c12009-10-30 11:49:00 +0000986 }
987}
988
989
Andrei Popescu31002712010-02-23 13:46:05 +0000990// Low-level code emission routines depending on the addressing mode.
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100991// If this returns true then you have to use the rotate_imm and immed_8
992// that it returns, because it may have already changed the instruction
993// to match them!
Steve Blocka7e24c12009-10-30 11:49:00 +0000994static bool fits_shifter(uint32_t imm32,
995 uint32_t* rotate_imm,
996 uint32_t* immed_8,
997 Instr* instr) {
Andrei Popescu31002712010-02-23 13:46:05 +0000998 // imm32 must be unsigned.
Steve Blocka7e24c12009-10-30 11:49:00 +0000999 for (int rot = 0; rot < 16; rot++) {
1000 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
1001 if ((imm8 <= 0xff)) {
1002 *rotate_imm = rot;
1003 *immed_8 = imm8;
1004 return true;
1005 }
1006 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001007 // If the opcode is one with a complementary version and the complementary
1008 // immediate fits, change the opcode.
1009 if (instr != NULL) {
1010 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
1011 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1012 *instr ^= kMovMvnFlip;
1013 return true;
1014 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001015 if (CpuFeatures::IsSupported(ARMv7)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001016 if (imm32 < 0x10000) {
1017 *instr ^= kMovwLeaveCCFlip;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001018 *instr |= Assembler::EncodeMovwImmediate(imm32);
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001019 *rotate_imm = *immed_8 = 0; // Not used for movw.
1020 return true;
1021 }
1022 }
1023 }
1024 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001025 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001026 *instr ^= kCmpCmnFlip;
1027 return true;
1028 }
1029 } else {
1030 Instr alu_insn = (*instr & kALUMask);
Steve Block1e0659c2011-05-24 12:43:12 +01001031 if (alu_insn == ADD ||
1032 alu_insn == SUB) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001033 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001034 *instr ^= kAddSubFlip;
1035 return true;
1036 }
Steve Block1e0659c2011-05-24 12:43:12 +01001037 } else if (alu_insn == AND ||
1038 alu_insn == BIC) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001039 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1040 *instr ^= kAndBicFlip;
1041 return true;
1042 }
1043 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001044 }
1045 }
1046 return false;
1047}
1048
1049
1050// We have to use the temporary register for things that can be relocated even
1051// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
1052// space. There is no guarantee that the relocated location can be similarly
1053// encoded.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001054bool Operand::must_output_reloc_info(const Assembler* assembler) const {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001055 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001056 if (assembler != NULL && assembler->predictable_code_size()) return true;
1057 return assembler->serializer_enabled();
1058 } else if (RelocInfo::IsNone(rmode_)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001059 return false;
1060 }
1061 return true;
1062}
1063
1064
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001065static bool use_mov_immediate_load(const Operand& x,
1066 const Assembler* assembler) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001067 if (FLAG_enable_ool_constant_pool && assembler != NULL &&
1068 !assembler->is_ool_constant_pool_available()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001069 return true;
1070 } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
1071 (assembler == NULL || !assembler->predictable_code_size())) {
1072 // Prefer movw / movt to constant pool if it is more efficient on the CPU.
1073 return true;
1074 } else if (x.must_output_reloc_info(assembler)) {
1075 // Prefer constant pool if data is likely to be patched.
1076 return false;
1077 } else {
1078 // Otherwise, use immediate load if movw / movt is available.
1079 return CpuFeatures::IsSupported(ARMv7);
1080 }
1081}
1082
1083
1084int Operand::instructions_required(const Assembler* assembler,
1085 Instr instr) const {
1086 if (rm_.is_valid()) return 1;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001087 uint32_t dummy1, dummy2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001088 if (must_output_reloc_info(assembler) ||
Steve Block44f0eee2011-05-26 01:26:41 +01001089 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1090 // The immediate operand cannot be encoded as a shifter operand, or use of
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001091 // constant pool is required. First account for the instructions required
1092 // for the constant pool or immediate load
1093 int instructions;
1094 if (use_mov_immediate_load(*this, assembler)) {
1095 // A movw / movt or mov / orr immediate load.
1096 instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
1097 } else if (assembler != NULL && assembler->use_extended_constant_pool()) {
1098 // An extended constant pool load.
1099 instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
Steve Block44f0eee2011-05-26 01:26:41 +01001100 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001101 // A small constant pool load.
1102 instructions = 1;
Steve Block44f0eee2011-05-26 01:26:41 +01001103 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001104
1105 if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set
1106 // For a mov or mvn instruction which doesn't set the condition
1107 // code, the constant pool or immediate load is enough, otherwise we need
1108 // to account for the actual instruction being requested.
1109 instructions += 1;
1110 }
1111 return instructions;
Steve Block44f0eee2011-05-26 01:26:41 +01001112 } else {
1113 // No use of constant pool and the immediate operand can be encoded as a
1114 // shifter operand.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001115 return 1;
1116 }
1117}
1118
1119
1120void Assembler::move_32_bit_immediate(Register rd,
1121 const Operand& x,
1122 Condition cond) {
1123 RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
1124 uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
1125 if (x.must_output_reloc_info(this)) {
1126 RecordRelocInfo(rinfo);
1127 }
1128
1129 if (use_mov_immediate_load(x, this)) {
1130 Register target = rd.code() == pc.code() ? ip : rd;
1131 if (CpuFeatures::IsSupported(ARMv7)) {
1132 if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
1133 // Make sure the movw/movt doesn't get separated.
1134 BlockConstPoolFor(2);
1135 }
1136 movw(target, imm32 & 0xffff, cond);
1137 movt(target, imm32 >> 16, cond);
1138 } else {
1139 DCHECK(FLAG_enable_ool_constant_pool);
1140 mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
1141 orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
1142 orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
1143 orr(target, target, Operand(imm32 & (kImm8Mask << 24)), LeaveCC, cond);
1144 }
1145 if (target.code() != rd.code()) {
1146 mov(rd, target, LeaveCC, cond);
1147 }
1148 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001149 DCHECK(!FLAG_enable_ool_constant_pool || is_ool_constant_pool_available());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001150 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
1151 if (section == ConstantPoolArray::EXTENDED_SECTION) {
1152 DCHECK(FLAG_enable_ool_constant_pool);
1153 Register target = rd.code() == pc.code() ? ip : rd;
1154 // Emit instructions to load constant pool offset.
1155 if (CpuFeatures::IsSupported(ARMv7)) {
1156 movw(target, 0, cond);
1157 movt(target, 0, cond);
1158 } else {
1159 mov(target, Operand(0), LeaveCC, cond);
1160 orr(target, target, Operand(0), LeaveCC, cond);
1161 orr(target, target, Operand(0), LeaveCC, cond);
1162 orr(target, target, Operand(0), LeaveCC, cond);
1163 }
1164 // Load from constant pool at offset.
1165 ldr(rd, MemOperand(pp, target), cond);
1166 } else {
1167 DCHECK(section == ConstantPoolArray::SMALL_SECTION);
1168 ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
1169 }
Steve Block44f0eee2011-05-26 01:26:41 +01001170 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001171}
1172
1173
Steve Blocka7e24c12009-10-30 11:49:00 +00001174void Assembler::addrmod1(Instr instr,
1175 Register rn,
1176 Register rd,
1177 const Operand& x) {
1178 CheckBuffer();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001179 DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001180 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001181 // Immediate.
Steve Blocka7e24c12009-10-30 11:49:00 +00001182 uint32_t rotate_imm;
1183 uint32_t immed_8;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001184 if (x.must_output_reloc_info(this) ||
Steve Blocka7e24c12009-10-30 11:49:00 +00001185 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
1186 // The immediate operand cannot be encoded as a shifter operand, so load
1187 // it first to register ip and change the original instruction to use ip.
1188 // However, if the original instruction is a 'mov rd, x' (not setting the
Andrei Popescu31002712010-02-23 13:46:05 +00001189 // condition code), then replace it with a 'ldr rd, [pc]'.
Steve Blocka7e24c12009-10-30 11:49:00 +00001190 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Steve Block1e0659c2011-05-24 12:43:12 +01001191 Condition cond = Instruction::ConditionField(instr);
1192 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001193 move_32_bit_immediate(rd, x, cond);
Steve Blocka7e24c12009-10-30 11:49:00 +00001194 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001195 mov(ip, x, LeaveCC, cond);
Steve Blocka7e24c12009-10-30 11:49:00 +00001196 addrmod1(instr, rn, rd, Operand(ip));
1197 }
1198 return;
1199 }
1200 instr |= I | rotate_imm*B8 | immed_8;
1201 } else if (!x.rs_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001202 // Immediate shift.
Steve Blocka7e24c12009-10-30 11:49:00 +00001203 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1204 } else {
Andrei Popescu31002712010-02-23 13:46:05 +00001205 // Register shift.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001206 DCHECK(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001207 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
1208 }
1209 emit(instr | rn.code()*B16 | rd.code()*B12);
Kristian Monsen50ef84f2010-07-29 15:18:00 +01001210 if (rn.is(pc) || x.rm_.is(pc)) {
Andrei Popescu31002712010-02-23 13:46:05 +00001211 // Block constant pool emission for one instruction after reading pc.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001212 BlockConstPoolFor(1);
Kristian Monsen50ef84f2010-07-29 15:18:00 +01001213 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001214}
1215
1216
1217void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001218 DCHECK((instr & ~(kCondMask | B | L)) == B26);
Steve Blocka7e24c12009-10-30 11:49:00 +00001219 int am = x.am_;
1220 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001221 // Immediate offset.
Steve Blocka7e24c12009-10-30 11:49:00 +00001222 int offset_12 = x.offset_;
1223 if (offset_12 < 0) {
1224 offset_12 = -offset_12;
1225 am ^= U;
1226 }
1227 if (!is_uint12(offset_12)) {
Andrei Popescu31002712010-02-23 13:46:05 +00001228 // Immediate offset cannot be encoded, load it first to register ip
1229 // rn (and rd in a load) should never be ip, or will be trashed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001230 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
Steve Block1e0659c2011-05-24 12:43:12 +01001231 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
Steve Blocka7e24c12009-10-30 11:49:00 +00001232 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
1233 return;
1234 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001235 DCHECK(offset_12 >= 0); // no masking needed
Steve Blocka7e24c12009-10-30 11:49:00 +00001236 instr |= offset_12;
1237 } else {
Andrei Popescu31002712010-02-23 13:46:05 +00001238 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
Steve Blocka7e24c12009-10-30 11:49:00 +00001239 // register offset the constructors make sure than both shift_imm_
Andrei Popescu31002712010-02-23 13:46:05 +00001240 // and shift_op_ are initialized.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001241 DCHECK(!x.rm_.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001242 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1243 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001244 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
Steve Blocka7e24c12009-10-30 11:49:00 +00001245 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1246}
1247
1248
1249void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001250 DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
1251 DCHECK(x.rn_.is_valid());
Steve Blocka7e24c12009-10-30 11:49:00 +00001252 int am = x.am_;
1253 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001254 // Immediate offset.
Steve Blocka7e24c12009-10-30 11:49:00 +00001255 int offset_8 = x.offset_;
1256 if (offset_8 < 0) {
1257 offset_8 = -offset_8;
1258 am ^= U;
1259 }
1260 if (!is_uint8(offset_8)) {
Andrei Popescu31002712010-02-23 13:46:05 +00001261 // Immediate offset cannot be encoded, load it first to register ip
1262 // rn (and rd in a load) should never be ip, or will be trashed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001263 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
Steve Block1e0659c2011-05-24 12:43:12 +01001264 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
Steve Blocka7e24c12009-10-30 11:49:00 +00001265 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1266 return;
1267 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001268 DCHECK(offset_8 >= 0); // no masking needed
Steve Blocka7e24c12009-10-30 11:49:00 +00001269 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
1270 } else if (x.shift_imm_ != 0) {
Andrei Popescu31002712010-02-23 13:46:05 +00001271 // Scaled register offset not supported, load index first
1272 // rn (and rd in a load) should never be ip, or will be trashed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001273 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
Steve Blocka7e24c12009-10-30 11:49:00 +00001274 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
Steve Block1e0659c2011-05-24 12:43:12 +01001275 Instruction::ConditionField(instr));
Steve Blocka7e24c12009-10-30 11:49:00 +00001276 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1277 return;
1278 } else {
Andrei Popescu31002712010-02-23 13:46:05 +00001279 // Register offset.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001280 DCHECK((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
Steve Blocka7e24c12009-10-30 11:49:00 +00001281 instr |= x.rm_.code();
1282 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001283 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
Steve Blocka7e24c12009-10-30 11:49:00 +00001284 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1285}
1286
1287
1288void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001289 DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
1290 DCHECK(rl != 0);
1291 DCHECK(!rn.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001292 emit(instr | rn.code()*B16 | rl);
1293}
1294
1295
1296void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
Andrei Popescu31002712010-02-23 13:46:05 +00001297 // Unindexed addressing is not encoded by this function.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001298 DCHECK_EQ((B27 | B26),
Steve Block1e0659c2011-05-24 12:43:12 +01001299 (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001300 DCHECK(x.rn_.is_valid() && !x.rm_.is_valid());
Steve Blocka7e24c12009-10-30 11:49:00 +00001301 int am = x.am_;
1302 int offset_8 = x.offset_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001303 DCHECK((offset_8 & 3) == 0); // offset must be an aligned word offset
Steve Blocka7e24c12009-10-30 11:49:00 +00001304 offset_8 >>= 2;
1305 if (offset_8 < 0) {
1306 offset_8 = -offset_8;
1307 am ^= U;
1308 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001309 DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte
1310 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
Steve Blocka7e24c12009-10-30 11:49:00 +00001311
Andrei Popescu31002712010-02-23 13:46:05 +00001312 // Post-indexed addressing requires W == 1; different than in addrmod2/3.
Steve Blocka7e24c12009-10-30 11:49:00 +00001313 if ((am & P) == 0)
1314 am |= W;
1315
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001316 DCHECK(offset_8 >= 0); // no masking needed
Steve Blocka7e24c12009-10-30 11:49:00 +00001317 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
1318}
1319
1320
1321int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1322 int target_pos;
1323 if (L->is_bound()) {
1324 target_pos = L->pos();
1325 } else {
1326 if (L->is_linked()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001327 // Point to previous instruction that uses the link.
1328 target_pos = L->pos();
Steve Blocka7e24c12009-10-30 11:49:00 +00001329 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001330 // First entry of the link chain points to itself.
1331 target_pos = pc_offset();
Steve Blocka7e24c12009-10-30 11:49:00 +00001332 }
1333 L->link_to(pc_offset());
1334 }
1335
1336 // Block the emission of the constant pool, since the branch instruction must
Andrei Popescu31002712010-02-23 13:46:05 +00001337 // be emitted at the pc offset recorded by the label.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001338 BlockConstPoolFor(1);
Steve Blocka7e24c12009-10-30 11:49:00 +00001339 return target_pos - (pc_offset() + kPcLoadDelta);
1340}
1341
1342
Andrei Popescu31002712010-02-23 13:46:05 +00001343// Branch instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001344void Assembler::b(int branch_offset, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001345 DCHECK((branch_offset & 3) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001346 int imm24 = branch_offset >> 2;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001347 CHECK(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +01001348 emit(cond | B27 | B25 | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001349
Steve Block6ded16b2010-05-10 14:33:55 +01001350 if (cond == al) {
Andrei Popescu31002712010-02-23 13:46:05 +00001351 // Dead code is a good location to emit the constant pool.
Steve Blocka7e24c12009-10-30 11:49:00 +00001352 CheckConstPool(false, false);
Steve Block6ded16b2010-05-10 14:33:55 +01001353 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001354}
1355
1356
1357void Assembler::bl(int branch_offset, Condition cond) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001358 positions_recorder()->WriteRecordedPositions();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001359 DCHECK((branch_offset & 3) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001360 int imm24 = branch_offset >> 2;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001361 CHECK(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +01001362 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001363}
1364
1365
1366void Assembler::blx(int branch_offset) { // v5 and above
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001367 positions_recorder()->WriteRecordedPositions();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001368 DCHECK((branch_offset & 1) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001369 int h = ((branch_offset & 2) >> 1)*B24;
1370 int imm24 = branch_offset >> 2;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001371 CHECK(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +01001372 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001373}
1374
1375
1376void Assembler::blx(Register target, Condition cond) { // v5 and above
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001377 positions_recorder()->WriteRecordedPositions();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001378 DCHECK(!target.is(pc));
Steve Block1e0659c2011-05-24 12:43:12 +01001379 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001380}
1381
1382
1383void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001384 positions_recorder()->WriteRecordedPositions();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001385 DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged
Steve Block1e0659c2011-05-24 12:43:12 +01001386 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001387}
1388
1389
Andrei Popescu31002712010-02-23 13:46:05 +00001390// Data-processing instructions.
1391
Steve Blocka7e24c12009-10-30 11:49:00 +00001392void Assembler::and_(Register dst, Register src1, const Operand& src2,
1393 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001394 addrmod1(cond | AND | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001395}
1396
1397
1398void Assembler::eor(Register dst, Register src1, const Operand& src2,
1399 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001400 addrmod1(cond | EOR | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001401}
1402
1403
1404void Assembler::sub(Register dst, Register src1, const Operand& src2,
1405 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001406 addrmod1(cond | SUB | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001407}
1408
1409
1410void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1411 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001412 addrmod1(cond | RSB | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001413}
1414
1415
1416void Assembler::add(Register dst, Register src1, const Operand& src2,
1417 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001418 addrmod1(cond | ADD | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001419}
1420
1421
1422void Assembler::adc(Register dst, Register src1, const Operand& src2,
1423 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001424 addrmod1(cond | ADC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001425}
1426
1427
1428void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1429 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001430 addrmod1(cond | SBC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001431}
1432
1433
1434void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1435 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001436 addrmod1(cond | RSC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001437}
1438
1439
1440void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001441 addrmod1(cond | TST | S, src1, r0, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001442}
1443
1444
1445void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001446 addrmod1(cond | TEQ | S, src1, r0, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001447}
1448
1449
1450void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001451 addrmod1(cond | CMP | S, src1, r0, src2);
1452}
1453
1454
1455void Assembler::cmp_raw_immediate(
1456 Register src, int raw_immediate, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001457 DCHECK(is_uint12(raw_immediate));
Steve Block1e0659c2011-05-24 12:43:12 +01001458 emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
Steve Blocka7e24c12009-10-30 11:49:00 +00001459}
1460
1461
1462void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001463 addrmod1(cond | CMN | S, src1, r0, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001464}
1465
1466
1467void Assembler::orr(Register dst, Register src1, const Operand& src2,
1468 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001469 addrmod1(cond | ORR | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001470}
1471
1472
1473void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1474 if (dst.is(pc)) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001475 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001476 }
Steve Block6ded16b2010-05-10 14:33:55 +01001477 // Don't allow nop instructions in the form mov rn, rn to be generated using
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001478 // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1479 // or MarkCode(int/NopMarkerTypes) pseudo instructions.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001480 DCHECK(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
Steve Block1e0659c2011-05-24 12:43:12 +01001481 addrmod1(cond | MOV | s, r0, dst, src);
Steve Blocka7e24c12009-10-30 11:49:00 +00001482}
1483
1484
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001485void Assembler::mov_label_offset(Register dst, Label* label) {
1486 if (label->is_bound()) {
1487 mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
1488 } else {
1489 // Emit the link to the label in the code stream followed by extra nop
1490 // instructions.
1491 // If the label is not linked, then start a new link chain by linking it to
1492 // itself, emitting pc_offset().
1493 int link = label->is_linked() ? label->pos() : pc_offset();
1494 label->link_to(pc_offset());
1495
1496 // When the label is bound, these instructions will be patched with a
1497 // sequence of movw/movt or mov/orr/orr instructions. They will load the
1498 // destination register with the position of the label from the beginning
1499 // of the code.
1500 //
1501 // The link will be extracted from the first instruction and the destination
1502 // register from the second.
1503 // For ARMv7:
1504 // link
1505 // mov dst, dst
1506 // For ARMv6:
1507 // link
1508 // mov dst, dst
1509 // mov dst, dst
1510 //
1511 // When the label gets bound: target_at extracts the link and target_at_put
1512 // patches the instructions.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001513 CHECK(is_uint24(link));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001514 BlockConstPoolScope block_const_pool(this);
1515 emit(link);
1516 nop(dst.code());
1517 if (!CpuFeatures::IsSupported(ARMv7)) {
1518 nop(dst.code());
1519 }
1520 }
1521}
1522
1523
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001524void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001525 DCHECK(CpuFeatures::IsSupported(ARMv7));
1526 emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001527}
1528
1529
1530void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001531 DCHECK(CpuFeatures::IsSupported(ARMv7));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001532 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1533}
1534
1535
Steve Blocka7e24c12009-10-30 11:49:00 +00001536void Assembler::bic(Register dst, Register src1, const Operand& src2,
1537 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001538 addrmod1(cond | BIC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001539}
1540
1541
1542void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001543 addrmod1(cond | MVN | s, r0, dst, src);
Steve Blocka7e24c12009-10-30 11:49:00 +00001544}
1545
1546
Andrei Popescu31002712010-02-23 13:46:05 +00001547// Multiply instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001548void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1549 SBit s, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001550 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001551 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1552 src2.code()*B8 | B7 | B4 | src1.code());
1553}
1554
1555
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001556void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1557 Condition cond) {
1558 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1559 DCHECK(IsEnabled(MLS));
1560 emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
1561 src2.code()*B8 | B7 | B4 | src1.code());
1562}
1563
1564
1565void Assembler::sdiv(Register dst, Register src1, Register src2,
1566 Condition cond) {
1567 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1568 DCHECK(IsEnabled(SUDIV));
1569 emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
1570 src2.code()*B8 | B4 | src1.code());
1571}
1572
1573
1574void Assembler::udiv(Register dst, Register src1, Register src2,
1575 Condition cond) {
1576 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1577 DCHECK(IsEnabled(SUDIV));
1578 emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
1579 src2.code() * B8 | B4 | src1.code());
1580}
1581
1582
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001583void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
1584 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001585 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001586 // dst goes in bits 16-19 for this instruction!
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001587 emit(cond | s | dst.code() * B16 | src2.code() * B8 | B7 | B4 | src1.code());
1588}
1589
1590
1591void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
1592 Condition cond) {
1593 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1594 emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 |
1595 srcA.code() * B12 | src2.code() * B8 | B4 | src1.code());
1596}
1597
1598
1599void Assembler::smmul(Register dst, Register src1, Register src2,
1600 Condition cond) {
1601 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1602 emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xf * B12 |
1603 src2.code() * B8 | B4 | src1.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001604}
1605
1606
1607void Assembler::smlal(Register dstL,
1608 Register dstH,
1609 Register src1,
1610 Register src2,
1611 SBit s,
1612 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001613 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1614 DCHECK(!dstL.is(dstH));
Steve Blocka7e24c12009-10-30 11:49:00 +00001615 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1616 src2.code()*B8 | B7 | B4 | src1.code());
1617}
1618
1619
1620void Assembler::smull(Register dstL,
1621 Register dstH,
1622 Register src1,
1623 Register src2,
1624 SBit s,
1625 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001626 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1627 DCHECK(!dstL.is(dstH));
Steve Blocka7e24c12009-10-30 11:49:00 +00001628 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1629 src2.code()*B8 | B7 | B4 | src1.code());
1630}
1631
1632
1633void Assembler::umlal(Register dstL,
1634 Register dstH,
1635 Register src1,
1636 Register src2,
1637 SBit s,
1638 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001639 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1640 DCHECK(!dstL.is(dstH));
Steve Blocka7e24c12009-10-30 11:49:00 +00001641 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1642 src2.code()*B8 | B7 | B4 | src1.code());
1643}
1644
1645
1646void Assembler::umull(Register dstL,
1647 Register dstH,
1648 Register src1,
1649 Register src2,
1650 SBit s,
1651 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001652 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1653 DCHECK(!dstL.is(dstH));
Steve Blocka7e24c12009-10-30 11:49:00 +00001654 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1655 src2.code()*B8 | B7 | B4 | src1.code());
1656}
1657
1658
Andrei Popescu31002712010-02-23 13:46:05 +00001659// Miscellaneous arithmetic instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001660void Assembler::clz(Register dst, Register src, Condition cond) {
1661 // v5 and above.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001662 DCHECK(!dst.is(pc) && !src.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001663 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
Steve Block1e0659c2011-05-24 12:43:12 +01001664 15*B8 | CLZ | src.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001665}
1666
1667
Kristian Monsen50ef84f2010-07-29 15:18:00 +01001668// Saturating instructions.
1669
1670// Unsigned saturate.
1671void Assembler::usat(Register dst,
1672 int satpos,
1673 const Operand& src,
1674 Condition cond) {
1675 // v6 and above.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001676 DCHECK(CpuFeatures::IsSupported(ARMv7));
1677 DCHECK(!dst.is(pc) && !src.rm_.is(pc));
1678 DCHECK((satpos >= 0) && (satpos <= 31));
1679 DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1680 DCHECK(src.rs_.is(no_reg));
Kristian Monsen50ef84f2010-07-29 15:18:00 +01001681
1682 int sh = 0;
1683 if (src.shift_op_ == ASR) {
1684 sh = 1;
1685 }
1686
1687 emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1688 src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1689}
1690
1691
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001692// Bitfield manipulation instructions.
1693
1694// Unsigned bit field extract.
1695// Extracts #width adjacent bits from position #lsb in a register, and
1696// writes them to the low bits of a destination register.
1697// ubfx dst, src, #lsb, #width
1698void Assembler::ubfx(Register dst,
1699 Register src,
1700 int lsb,
1701 int width,
1702 Condition cond) {
1703 // v7 and above.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001704 DCHECK(CpuFeatures::IsSupported(ARMv7));
1705 DCHECK(!dst.is(pc) && !src.is(pc));
1706 DCHECK((lsb >= 0) && (lsb <= 31));
1707 DCHECK((width >= 1) && (width <= (32 - lsb)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001708 emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1709 lsb*B7 | B6 | B4 | src.code());
1710}
1711
1712
1713// Signed bit field extract.
1714// Extracts #width adjacent bits from position #lsb in a register, and
1715// writes them to the low bits of a destination register. The extracted
1716// value is sign extended to fill the destination register.
1717// sbfx dst, src, #lsb, #width
1718void Assembler::sbfx(Register dst,
1719 Register src,
1720 int lsb,
1721 int width,
1722 Condition cond) {
1723 // v7 and above.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001724 DCHECK(CpuFeatures::IsSupported(ARMv7));
1725 DCHECK(!dst.is(pc) && !src.is(pc));
1726 DCHECK((lsb >= 0) && (lsb <= 31));
1727 DCHECK((width >= 1) && (width <= (32 - lsb)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001728 emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1729 lsb*B7 | B6 | B4 | src.code());
1730}
1731
1732
1733// Bit field clear.
1734// Sets #width adjacent bits at position #lsb in the destination register
1735// to zero, preserving the value of the other bits.
1736// bfc dst, #lsb, #width
1737void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1738 // v7 and above.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001739 DCHECK(CpuFeatures::IsSupported(ARMv7));
1740 DCHECK(!dst.is(pc));
1741 DCHECK((lsb >= 0) && (lsb <= 31));
1742 DCHECK((width >= 1) && (width <= (32 - lsb)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001743 int msb = lsb + width - 1;
1744 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1745}
1746
1747
1748// Bit field insert.
1749// Inserts #width adjacent bits from the low bits of the source register
1750// into position #lsb of the destination register.
1751// bfi dst, src, #lsb, #width
1752void Assembler::bfi(Register dst,
1753 Register src,
1754 int lsb,
1755 int width,
1756 Condition cond) {
1757 // v7 and above.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001758 DCHECK(CpuFeatures::IsSupported(ARMv7));
1759 DCHECK(!dst.is(pc) && !src.is(pc));
1760 DCHECK((lsb >= 0) && (lsb <= 31));
1761 DCHECK((width >= 1) && (width <= (32 - lsb)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001762 int msb = lsb + width - 1;
1763 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1764 src.code());
1765}
1766
1767
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001768void Assembler::pkhbt(Register dst,
1769 Register src1,
1770 const Operand& src2,
1771 Condition cond ) {
1772 // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1773 // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1774 // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
1775 DCHECK(!dst.is(pc));
1776 DCHECK(!src1.is(pc));
1777 DCHECK(!src2.rm().is(pc));
1778 DCHECK(!src2.rm().is(no_reg));
1779 DCHECK(src2.rs().is(no_reg));
1780 DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
1781 DCHECK(src2.shift_op() == LSL);
1782 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1783 src2.shift_imm_*B7 | B4 | src2.rm().code());
1784}
1785
1786
1787void Assembler::pkhtb(Register dst,
1788 Register src1,
1789 const Operand& src2,
1790 Condition cond) {
1791 // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1792 // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1793 // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
1794 DCHECK(!dst.is(pc));
1795 DCHECK(!src1.is(pc));
1796 DCHECK(!src2.rm().is(pc));
1797 DCHECK(!src2.rm().is(no_reg));
1798 DCHECK(src2.rs().is(no_reg));
1799 DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
1800 DCHECK(src2.shift_op() == ASR);
1801 int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
1802 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1803 asr*B7 | B6 | B4 | src2.rm().code());
1804}
1805
1806
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001807void Assembler::sxtb(Register dst, Register src, int rotate, Condition cond) {
1808 // Instruction details available in ARM DDI 0406C.b, A8.8.233.
1809 // cond(31-28) | 01101010(27-20) | 1111(19-16) |
1810 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1811 DCHECK(!dst.is(pc));
1812 DCHECK(!src.is(pc));
1813 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1814 emit(cond | 0x6A * B20 | 0xF * B16 | dst.code() * B12 |
1815 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1816}
1817
1818
1819void Assembler::sxtab(Register dst, Register src1, Register src2, int rotate,
1820 Condition cond) {
1821 // Instruction details available in ARM DDI 0406C.b, A8.8.233.
1822 // cond(31-28) | 01101010(27-20) | Rn(19-16) |
1823 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1824 DCHECK(!dst.is(pc));
1825 DCHECK(!src1.is(pc));
1826 DCHECK(!src2.is(pc));
1827 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1828 emit(cond | 0x6A * B20 | src1.code() * B16 | dst.code() * B12 |
1829 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
1830}
1831
1832
1833void Assembler::sxth(Register dst, Register src, int rotate, Condition cond) {
1834 // Instruction details available in ARM DDI 0406C.b, A8.8.235.
1835 // cond(31-28) | 01101011(27-20) | 1111(19-16) |
1836 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1837 DCHECK(!dst.is(pc));
1838 DCHECK(!src.is(pc));
1839 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1840 emit(cond | 0x6B * B20 | 0xF * B16 | dst.code() * B12 |
1841 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1842}
1843
1844
1845void Assembler::sxtah(Register dst, Register src1, Register src2, int rotate,
1846 Condition cond) {
1847 // Instruction details available in ARM DDI 0406C.b, A8.8.235.
1848 // cond(31-28) | 01101011(27-20) | Rn(19-16) |
1849 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1850 DCHECK(!dst.is(pc));
1851 DCHECK(!src1.is(pc));
1852 DCHECK(!src2.is(pc));
1853 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1854 emit(cond | 0x6B * B20 | src1.code() * B16 | dst.code() * B12 |
1855 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
1856}
1857
1858
1859void Assembler::uxtb(Register dst, Register src, int rotate, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001860 // Instruction details available in ARM DDI 0406C.b, A8.8.274.
1861 // cond(31-28) | 01101110(27-20) | 1111(19-16) |
1862 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1863 DCHECK(!dst.is(pc));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001864 DCHECK(!src.is(pc));
1865 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1866 emit(cond | 0x6E * B20 | 0xF * B16 | dst.code() * B12 |
1867 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001868}
1869
1870
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001871void Assembler::uxtab(Register dst, Register src1, Register src2, int rotate,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001872 Condition cond) {
1873 // Instruction details available in ARM DDI 0406C.b, A8.8.271.
1874 // cond(31-28) | 01101110(27-20) | Rn(19-16) |
1875 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1876 DCHECK(!dst.is(pc));
1877 DCHECK(!src1.is(pc));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001878 DCHECK(!src2.is(pc));
1879 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1880 emit(cond | 0x6E * B20 | src1.code() * B16 | dst.code() * B12 |
1881 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001882}
1883
1884
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001885void Assembler::uxtb16(Register dst, Register src, int rotate, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001886 // Instruction details available in ARM DDI 0406C.b, A8.8.275.
1887 // cond(31-28) | 01101100(27-20) | 1111(19-16) |
1888 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1889 DCHECK(!dst.is(pc));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001890 DCHECK(!src.is(pc));
1891 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1892 emit(cond | 0x6C * B20 | 0xF * B16 | dst.code() * B12 |
1893 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1894}
1895
1896
1897void Assembler::uxth(Register dst, Register src, int rotate, Condition cond) {
1898 // Instruction details available in ARM DDI 0406C.b, A8.8.276.
1899 // cond(31-28) | 01101111(27-20) | 1111(19-16) |
1900 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1901 DCHECK(!dst.is(pc));
1902 DCHECK(!src.is(pc));
1903 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1904 emit(cond | 0x6F * B20 | 0xF * B16 | dst.code() * B12 |
1905 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1906}
1907
1908
1909void Assembler::uxtah(Register dst, Register src1, Register src2, int rotate,
1910 Condition cond) {
1911 // Instruction details available in ARM DDI 0406C.b, A8.8.273.
1912 // cond(31-28) | 01101111(27-20) | Rn(19-16) |
1913 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1914 DCHECK(!dst.is(pc));
1915 DCHECK(!src1.is(pc));
1916 DCHECK(!src2.is(pc));
1917 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1918 emit(cond | 0x6F * B20 | src1.code() * B16 | dst.code() * B12 |
1919 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001920}
1921
1922
Andrei Popescu31002712010-02-23 13:46:05 +00001923// Status register access instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001924void Assembler::mrs(Register dst, SRegister s, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001925 DCHECK(!dst.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001926 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1927}
1928
1929
1930void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1931 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001932 DCHECK(fields >= B16 && fields < B20); // at least one field set
Steve Blocka7e24c12009-10-30 11:49:00 +00001933 Instr instr;
1934 if (!src.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001935 // Immediate.
Steve Blocka7e24c12009-10-30 11:49:00 +00001936 uint32_t rotate_imm;
1937 uint32_t immed_8;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001938 if (src.must_output_reloc_info(this) ||
Steve Blocka7e24c12009-10-30 11:49:00 +00001939 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
Andrei Popescu31002712010-02-23 13:46:05 +00001940 // Immediate operand cannot be encoded, load it first to register ip.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001941 move_32_bit_immediate(ip, src);
Steve Blocka7e24c12009-10-30 11:49:00 +00001942 msr(fields, Operand(ip), cond);
1943 return;
1944 }
1945 instr = I | rotate_imm*B8 | immed_8;
1946 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001947 DCHECK(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
Steve Blocka7e24c12009-10-30 11:49:00 +00001948 instr = src.rm_.code();
1949 }
1950 emit(cond | instr | B24 | B21 | fields | 15*B12);
1951}
1952
1953
Andrei Popescu31002712010-02-23 13:46:05 +00001954// Load/Store instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001955void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1956 if (dst.is(pc)) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001957 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001958 }
1959 addrmod2(cond | B26 | L, dst, src);
Steve Blocka7e24c12009-10-30 11:49:00 +00001960}
1961
1962
1963void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
1964 addrmod2(cond | B26, src, dst);
Steve Blocka7e24c12009-10-30 11:49:00 +00001965}
1966
1967
1968void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
1969 addrmod2(cond | B26 | B | L, dst, src);
1970}
1971
1972
1973void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
1974 addrmod2(cond | B26 | B, src, dst);
1975}
1976
1977
1978void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
1979 addrmod3(cond | L | B7 | H | B4, dst, src);
1980}
1981
1982
1983void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
1984 addrmod3(cond | B7 | H | B4, src, dst);
1985}
1986
1987
1988void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
1989 addrmod3(cond | L | B7 | S6 | B4, dst, src);
1990}
1991
1992
1993void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
1994 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
1995}
1996
1997
Leon Clarkef7060e22010-06-03 12:02:55 +01001998void Assembler::ldrd(Register dst1, Register dst2,
1999 const MemOperand& src, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002000 DCHECK(IsEnabled(ARMv7));
2001 DCHECK(src.rm().is(no_reg));
2002 DCHECK(!dst1.is(lr)); // r14.
2003 DCHECK_EQ(0, dst1.code() % 2);
2004 DCHECK_EQ(dst1.code() + 1, dst2.code());
Leon Clarkef7060e22010-06-03 12:02:55 +01002005 addrmod3(cond | B7 | B6 | B4, dst1, src);
Kristian Monsen25f61362010-05-21 11:50:48 +01002006}
2007
2008
Leon Clarkef7060e22010-06-03 12:02:55 +01002009void Assembler::strd(Register src1, Register src2,
2010 const MemOperand& dst, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002011 DCHECK(dst.rm().is(no_reg));
2012 DCHECK(!src1.is(lr)); // r14.
2013 DCHECK_EQ(0, src1.code() % 2);
2014 DCHECK_EQ(src1.code() + 1, src2.code());
2015 DCHECK(IsEnabled(ARMv7));
Leon Clarkef7060e22010-06-03 12:02:55 +01002016 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
Kristian Monsen25f61362010-05-21 11:50:48 +01002017}
2018
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002019
2020// Preload instructions.
2021void Assembler::pld(const MemOperand& address) {
2022 // Instruction details available in ARM DDI 0406C.b, A8.8.128.
2023 // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
2024 // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
2025 DCHECK(address.rm().is(no_reg));
2026 DCHECK(address.am() == Offset);
2027 int U = B23;
2028 int offset = address.offset();
2029 if (offset < 0) {
2030 offset = -offset;
2031 U = 0;
2032 }
2033 DCHECK(offset < 4096);
2034 emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
2035 0xf*B12 | offset);
2036}
2037
2038
Andrei Popescu31002712010-02-23 13:46:05 +00002039// Load/Store multiple instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00002040void Assembler::ldm(BlockAddrMode am,
2041 Register base,
2042 RegList dst,
2043 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00002044 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002045 DCHECK(base.is(sp) || (dst & sp.bit()) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002046
2047 addrmod4(cond | B27 | am | L, base, dst);
2048
Andrei Popescu31002712010-02-23 13:46:05 +00002049 // Emit the constant pool after a function return implemented by ldm ..{..pc}.
Steve Blocka7e24c12009-10-30 11:49:00 +00002050 if (cond == al && (dst & pc.bit()) != 0) {
2051 // There is a slight chance that the ldm instruction was actually a call,
2052 // in which case it would be wrong to return into the constant pool; we
2053 // recognize this case by checking if the emission of the pool was blocked
2054 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
2055 // the case, we emit a jump over the pool.
2056 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
2057 }
2058}
2059
2060
2061void Assembler::stm(BlockAddrMode am,
2062 Register base,
2063 RegList src,
2064 Condition cond) {
2065 addrmod4(cond | B27 | am, base, src);
2066}
2067
2068
Andrei Popescu31002712010-02-23 13:46:05 +00002069// Exception-generating instructions and debugging support.
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08002070// Stops with a non-negative code less than kNumOfWatchedStops support
2071// enabling/disabling and a counter feature. See simulator-arm.h .
2072void Assembler::stop(const char* msg, Condition cond, int32_t code) {
Andrei Popescu402d9372010-02-26 13:31:12 +00002073#ifndef __arm__
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002074 DCHECK(code >= kDefaultStopCode);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002075 {
2076 // The Simulator will handle the stop instruction and get the message
2077 // address. It expects to find the address just after the svc instruction.
2078 BlockConstPoolScope block_const_pool(this);
2079 if (code >= 0) {
2080 svc(kStopCode + code, cond);
2081 } else {
2082 svc(kStopCode + kMaxStopCode, cond);
2083 }
2084 emit(reinterpret_cast<Instr>(msg));
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08002085 }
Andrei Popescu402d9372010-02-26 13:31:12 +00002086#else // def __arm__
Steve Block1e0659c2011-05-24 12:43:12 +01002087 if (cond != al) {
2088 Label skip;
2089 b(&skip, NegateCondition(cond));
2090 bkpt(0);
2091 bind(&skip);
2092 } else {
2093 bkpt(0);
2094 }
Andrei Popescu402d9372010-02-26 13:31:12 +00002095#endif // def __arm__
Steve Blocka7e24c12009-10-30 11:49:00 +00002096}
2097
2098
2099void Assembler::bkpt(uint32_t imm16) { // v5 and above
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002100 DCHECK(is_uint16(imm16));
Steve Block1e0659c2011-05-24 12:43:12 +01002101 emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
Steve Blocka7e24c12009-10-30 11:49:00 +00002102}
2103
2104
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08002105void Assembler::svc(uint32_t imm24, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002106 DCHECK(is_uint24(imm24));
Steve Blocka7e24c12009-10-30 11:49:00 +00002107 emit(cond | 15*B24 | imm24);
2108}
2109
2110
Andrei Popescu31002712010-02-23 13:46:05 +00002111// Coprocessor instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00002112void Assembler::cdp(Coprocessor coproc,
2113 int opcode_1,
2114 CRegister crd,
2115 CRegister crn,
2116 CRegister crm,
2117 int opcode_2,
2118 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002119 DCHECK(is_uint4(opcode_1) && is_uint3(opcode_2));
Steve Blocka7e24c12009-10-30 11:49:00 +00002120 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
2121 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
2122}
2123
2124
2125void Assembler::cdp2(Coprocessor coproc,
2126 int opcode_1,
2127 CRegister crd,
2128 CRegister crn,
2129 CRegister crm,
2130 int opcode_2) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01002131 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00002132}
2133
2134
2135void Assembler::mcr(Coprocessor coproc,
2136 int opcode_1,
2137 Register rd,
2138 CRegister crn,
2139 CRegister crm,
2140 int opcode_2,
2141 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002142 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
Steve Blocka7e24c12009-10-30 11:49:00 +00002143 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
2144 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2145}
2146
2147
2148void Assembler::mcr2(Coprocessor coproc,
2149 int opcode_1,
2150 Register rd,
2151 CRegister crn,
2152 CRegister crm,
2153 int opcode_2) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01002154 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00002155}
2156
2157
2158void Assembler::mrc(Coprocessor coproc,
2159 int opcode_1,
2160 Register rd,
2161 CRegister crn,
2162 CRegister crm,
2163 int opcode_2,
2164 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002165 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
Steve Blocka7e24c12009-10-30 11:49:00 +00002166 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
2167 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2168}
2169
2170
2171void Assembler::mrc2(Coprocessor coproc,
2172 int opcode_1,
2173 Register rd,
2174 CRegister crn,
2175 CRegister crm,
2176 int opcode_2) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01002177 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00002178}
2179
2180
2181void Assembler::ldc(Coprocessor coproc,
2182 CRegister crd,
2183 const MemOperand& src,
2184 LFlag l,
2185 Condition cond) {
2186 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
2187}
2188
2189
2190void Assembler::ldc(Coprocessor coproc,
2191 CRegister crd,
2192 Register rn,
2193 int option,
2194 LFlag l,
2195 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00002196 // Unindexed addressing.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002197 DCHECK(is_uint8(option));
Steve Blocka7e24c12009-10-30 11:49:00 +00002198 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
2199 coproc*B8 | (option & 255));
2200}
2201
2202
2203void Assembler::ldc2(Coprocessor coproc,
2204 CRegister crd,
2205 const MemOperand& src,
2206 LFlag l) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01002207 ldc(coproc, crd, src, l, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00002208}
2209
2210
2211void Assembler::ldc2(Coprocessor coproc,
2212 CRegister crd,
2213 Register rn,
2214 int option,
2215 LFlag l) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01002216 ldc(coproc, crd, rn, option, l, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00002217}
2218
2219
Steve Blockd0582a62009-12-15 09:54:21 +00002220// Support for VFP.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002221
Leon Clarked91b9f72010-01-27 17:25:45 +00002222void Assembler::vldr(const DwVfpRegister dst,
2223 const Register base,
2224 int offset,
2225 const Condition cond) {
2226 // Ddst = MEM(Rbase + offset).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002227 // Instruction details available in ARM DDI 0406C.b, A8-924.
2228 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
2229 // Vd(15-12) | 1011(11-8) | offset
Ben Murdochb0fe1622011-05-05 13:52:32 +01002230 int u = 1;
2231 if (offset < 0) {
2232 offset = -offset;
2233 u = 0;
2234 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002235 int vd, d;
2236 dst.split_code(&vd, &d);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002237
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002238 DCHECK(offset >= 0);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002239 if ((offset % 4) == 0 && (offset / 4) < 256) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002240 emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002241 0xB*B8 | ((offset / 4) & 255));
2242 } else {
2243 // Larger offsets must be handled by computing the correct address
2244 // in the ip register.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002245 DCHECK(!base.is(ip));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002246 if (u == 1) {
2247 add(ip, base, Operand(offset));
2248 } else {
2249 sub(ip, base, Operand(offset));
2250 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002251 emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002252 }
2253}
2254
2255
2256void Assembler::vldr(const DwVfpRegister dst,
2257 const MemOperand& operand,
2258 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002259 DCHECK(operand.am_ == Offset);
2260 if (operand.rm().is_valid()) {
2261 add(ip, operand.rn(),
2262 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2263 vldr(dst, ip, 0, cond);
2264 } else {
2265 vldr(dst, operand.rn(), operand.offset(), cond);
2266 }
Leon Clarked91b9f72010-01-27 17:25:45 +00002267}
2268
2269
Steve Block6ded16b2010-05-10 14:33:55 +01002270void Assembler::vldr(const SwVfpRegister dst,
2271 const Register base,
2272 int offset,
2273 const Condition cond) {
2274 // Sdst = MEM(Rbase + offset).
2275 // Instruction details available in ARM DDI 0406A, A8-628.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002276 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
Steve Block6ded16b2010-05-10 14:33:55 +01002277 // Vdst(15-12) | 1010(11-8) | offset
Ben Murdochb0fe1622011-05-05 13:52:32 +01002278 int u = 1;
2279 if (offset < 0) {
2280 offset = -offset;
2281 u = 0;
2282 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002283 int sd, d;
2284 dst.split_code(&sd, &d);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002285 DCHECK(offset >= 0);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002286
2287 if ((offset % 4) == 0 && (offset / 4) < 256) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01002288 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
Steve Block6ded16b2010-05-10 14:33:55 +01002289 0xA*B8 | ((offset / 4) & 255));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002290 } else {
2291 // Larger offsets must be handled by computing the correct address
2292 // in the ip register.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002293 DCHECK(!base.is(ip));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002294 if (u == 1) {
2295 add(ip, base, Operand(offset));
2296 } else {
2297 sub(ip, base, Operand(offset));
2298 }
2299 emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2300 }
2301}
2302
2303
2304void Assembler::vldr(const SwVfpRegister dst,
2305 const MemOperand& operand,
2306 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002307 DCHECK(operand.am_ == Offset);
2308 if (operand.rm().is_valid()) {
2309 add(ip, operand.rn(),
2310 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2311 vldr(dst, ip, 0, cond);
2312 } else {
2313 vldr(dst, operand.rn(), operand.offset(), cond);
2314 }
Steve Block6ded16b2010-05-10 14:33:55 +01002315}
2316
2317
Leon Clarked91b9f72010-01-27 17:25:45 +00002318void Assembler::vstr(const DwVfpRegister src,
2319 const Register base,
2320 int offset,
2321 const Condition cond) {
2322 // MEM(Rbase + offset) = Dsrc.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002323 // Instruction details available in ARM DDI 0406C.b, A8-1082.
2324 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
2325 // Vd(15-12) | 1011(11-8) | (offset/4)
Ben Murdochb0fe1622011-05-05 13:52:32 +01002326 int u = 1;
2327 if (offset < 0) {
2328 offset = -offset;
2329 u = 0;
2330 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002331 DCHECK(offset >= 0);
2332 int vd, d;
2333 src.split_code(&vd, &d);
2334
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002335 if ((offset % 4) == 0 && (offset / 4) < 256) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002336 emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
2337 ((offset / 4) & 255));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002338 } else {
2339 // Larger offsets must be handled by computing the correct address
2340 // in the ip register.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002341 DCHECK(!base.is(ip));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002342 if (u == 1) {
2343 add(ip, base, Operand(offset));
2344 } else {
2345 sub(ip, base, Operand(offset));
2346 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002347 emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002348 }
2349}
2350
2351
2352void Assembler::vstr(const DwVfpRegister src,
2353 const MemOperand& operand,
2354 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002355 DCHECK(operand.am_ == Offset);
2356 if (operand.rm().is_valid()) {
2357 add(ip, operand.rn(),
2358 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2359 vstr(src, ip, 0, cond);
2360 } else {
2361 vstr(src, operand.rn(), operand.offset(), cond);
2362 }
Leon Clarked91b9f72010-01-27 17:25:45 +00002363}
2364
2365
Iain Merrick75681382010-08-19 15:07:18 +01002366void Assembler::vstr(const SwVfpRegister src,
2367 const Register base,
2368 int offset,
2369 const Condition cond) {
2370 // MEM(Rbase + offset) = SSrc.
2371 // Instruction details available in ARM DDI 0406A, A8-786.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002372 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
Iain Merrick75681382010-08-19 15:07:18 +01002373 // Vdst(15-12) | 1010(11-8) | (offset/4)
Ben Murdochb0fe1622011-05-05 13:52:32 +01002374 int u = 1;
2375 if (offset < 0) {
2376 offset = -offset;
2377 u = 0;
2378 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002379 int sd, d;
2380 src.split_code(&sd, &d);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002381 DCHECK(offset >= 0);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002382 if ((offset % 4) == 0 && (offset / 4) < 256) {
2383 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
2384 0xA*B8 | ((offset / 4) & 255));
2385 } else {
2386 // Larger offsets must be handled by computing the correct address
2387 // in the ip register.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002388 DCHECK(!base.is(ip));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002389 if (u == 1) {
2390 add(ip, base, Operand(offset));
2391 } else {
2392 sub(ip, base, Operand(offset));
2393 }
2394 emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2395 }
2396}
2397
2398
2399void Assembler::vstr(const SwVfpRegister src,
2400 const MemOperand& operand,
2401 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002402 DCHECK(operand.am_ == Offset);
2403 if (operand.rm().is_valid()) {
2404 add(ip, operand.rn(),
2405 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2406 vstr(src, ip, 0, cond);
2407 } else {
2408 vstr(src, operand.rn(), operand.offset(), cond);
2409 }
Iain Merrick75681382010-08-19 15:07:18 +01002410}
2411
2412
Ben Murdoch8b112d22011-06-08 16:22:53 +01002413void Assembler::vldm(BlockAddrMode am,
2414 Register base,
2415 DwVfpRegister first,
2416 DwVfpRegister last,
2417 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002418 // Instruction details available in ARM DDI 0406C.b, A8-922.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002419 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002420 // first(15-12) | 1011(11-8) | (count * 2)
2421 DCHECK_LE(first.code(), last.code());
2422 DCHECK(am == ia || am == ia_w || am == db_w);
2423 DCHECK(!base.is(pc));
Ben Murdoch8b112d22011-06-08 16:22:53 +01002424
2425 int sd, d;
2426 first.split_code(&sd, &d);
2427 int count = last.code() - first.code() + 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002428 DCHECK(count <= 16);
Ben Murdoch8b112d22011-06-08 16:22:53 +01002429 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2430 0xB*B8 | count*2);
2431}
2432
2433
2434void Assembler::vstm(BlockAddrMode am,
2435 Register base,
2436 DwVfpRegister first,
2437 DwVfpRegister last,
2438 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002439 // Instruction details available in ARM DDI 0406C.b, A8-1080.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002440 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2441 // first(15-12) | 1011(11-8) | (count * 2)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002442 DCHECK_LE(first.code(), last.code());
2443 DCHECK(am == ia || am == ia_w || am == db_w);
2444 DCHECK(!base.is(pc));
Ben Murdoch8b112d22011-06-08 16:22:53 +01002445
2446 int sd, d;
2447 first.split_code(&sd, &d);
2448 int count = last.code() - first.code() + 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002449 DCHECK(count <= 16);
Ben Murdoch8b112d22011-06-08 16:22:53 +01002450 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2451 0xB*B8 | count*2);
2452}
2453
2454void Assembler::vldm(BlockAddrMode am,
2455 Register base,
2456 SwVfpRegister first,
2457 SwVfpRegister last,
2458 Condition cond) {
2459 // Instruction details available in ARM DDI 0406A, A8-626.
2460 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2461 // first(15-12) | 1010(11-8) | (count/2)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002462 DCHECK_LE(first.code(), last.code());
2463 DCHECK(am == ia || am == ia_w || am == db_w);
2464 DCHECK(!base.is(pc));
Ben Murdoch8b112d22011-06-08 16:22:53 +01002465
2466 int sd, d;
2467 first.split_code(&sd, &d);
2468 int count = last.code() - first.code() + 1;
2469 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2470 0xA*B8 | count);
2471}
2472
2473
2474void Assembler::vstm(BlockAddrMode am,
2475 Register base,
2476 SwVfpRegister first,
2477 SwVfpRegister last,
2478 Condition cond) {
2479 // Instruction details available in ARM DDI 0406A, A8-784.
2480 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2481 // first(15-12) | 1011(11-8) | (count/2)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002482 DCHECK_LE(first.code(), last.code());
2483 DCHECK(am == ia || am == ia_w || am == db_w);
2484 DCHECK(!base.is(pc));
Ben Murdoch8b112d22011-06-08 16:22:53 +01002485
2486 int sd, d;
2487 first.split_code(&sd, &d);
2488 int count = last.code() - first.code() + 1;
2489 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2490 0xA*B8 | count);
2491}
2492
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002493
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002494void Assembler::vmov(const SwVfpRegister dst, float imm) {
2495 mov(ip, Operand(bit_cast<int32_t>(imm)));
2496 vmov(dst, ip);
2497}
2498
2499
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002500static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2501 uint64_t i;
2502 memcpy(&i, &d, 8);
2503
2504 *lo = i & 0xffffffff;
2505 *hi = i >> 32;
2506}
2507
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002508
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002509// Only works for little endian floating point formats.
2510// We don't support VFP on the mixed endian floating point platform.
2511static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002512 DCHECK(CpuFeatures::IsSupported(VFP3));
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002513
2514 // VMOV can accept an immediate of the form:
2515 //
2516 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
2517 //
2518 // The immediate is encoded using an 8-bit quantity, comprised of two
2519 // 4-bit fields. For an 8-bit immediate of the form:
2520 //
2521 // [abcdefgh]
2522 //
2523 // where a is the MSB and h is the LSB, an immediate 64-bit double can be
2524 // created of the form:
2525 //
2526 // [aBbbbbbb,bbcdefgh,00000000,00000000,
2527 // 00000000,00000000,00000000,00000000]
2528 //
2529 // where B = ~b.
2530 //
2531
2532 uint32_t lo, hi;
2533 DoubleAsTwoUInt32(d, &lo, &hi);
2534
2535 // The most obvious constraint is the long block of zeroes.
2536 if ((lo != 0) || ((hi & 0xffff) != 0)) {
2537 return false;
2538 }
2539
2540 // Bits 62:55 must be all clear or all set.
2541 if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
2542 return false;
2543 }
2544
2545 // Bit 63 must be NOT bit 62.
2546 if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2547 return false;
2548 }
2549
2550 // Create the encoded immediate in the form:
2551 // [00000000,0000abcd,00000000,0000efgh]
2552 *encoding = (hi >> 16) & 0xf; // Low nybble.
2553 *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
2554 *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
2555
2556 return true;
2557}
2558
2559
2560void Assembler::vmov(const DwVfpRegister dst,
2561 double imm,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002562 const Register scratch) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002563 uint32_t enc;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002564 if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002565 // The double can be encoded in the instruction.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002566 //
2567 // Dd = immediate
2568 // Instruction details available in ARM DDI 0406C.b, A8-936.
2569 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2570 // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
2571 int vd, d;
2572 dst.split_code(&vd, &d);
2573 emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002574 } else if (FLAG_enable_vldr_imm && is_ool_constant_pool_available()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002575 // TODO(jfb) Temporarily turned off until we have constant blinding or
2576 // some equivalent mitigation: an attacker can otherwise control
2577 // generated data which also happens to be executable, a Very Bad
2578 // Thing indeed.
2579 // Blinding gets tricky because we don't have xor, we probably
2580 // need to add/subtract without losing precision, which requires a
2581 // cookie value that Lithium is probably better positioned to
2582 // choose.
2583 // We could also add a few peepholes here like detecting 0.0 and
2584 // -0.0 and doing a vmov from the sequestered d14, forcing denorms
2585 // to zero (we set flush-to-zero), and normalizing NaN values.
2586 // We could also detect redundant values.
2587 // The code could also randomize the order of values, though
2588 // that's tricky because vldr has a limited reach. Furthermore
2589 // it breaks load locality.
2590 RelocInfo rinfo(pc_, imm);
2591 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
2592 if (section == ConstantPoolArray::EXTENDED_SECTION) {
2593 DCHECK(FLAG_enable_ool_constant_pool);
2594 // Emit instructions to load constant pool offset.
2595 movw(ip, 0);
2596 movt(ip, 0);
2597 // Load from constant pool at offset.
2598 vldr(dst, MemOperand(pp, ip));
2599 } else {
2600 DCHECK(section == ConstantPoolArray::SMALL_SECTION);
2601 vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
2602 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002603 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002604 // Synthesise the double from ARM immediates.
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002605 uint32_t lo, hi;
2606 DoubleAsTwoUInt32(imm, &lo, &hi);
2607
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002608 if (lo == hi) {
2609 // Move the low and high parts of the double to a D register in one
2610 // instruction.
2611 mov(ip, Operand(lo));
2612 vmov(dst, ip, ip);
2613 } else if (scratch.is(no_reg)) {
2614 mov(ip, Operand(lo));
2615 vmov(dst, VmovIndexLo, ip);
2616 if ((lo & 0xffff) == (hi & 0xffff)) {
2617 movt(ip, hi >> 16);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002618 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002619 mov(ip, Operand(hi));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002620 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002621 vmov(dst, VmovIndexHi, ip);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002622 } else {
2623 // Move the low and high parts of the double to a D register in one
2624 // instruction.
2625 mov(ip, Operand(lo));
2626 mov(scratch, Operand(hi));
2627 vmov(dst, ip, scratch);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002628 }
2629 }
2630}
2631
2632
2633void Assembler::vmov(const SwVfpRegister dst,
2634 const SwVfpRegister src,
2635 const Condition cond) {
2636 // Sd = Sm
2637 // Instruction details available in ARM DDI 0406B, A8-642.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002638 int sd, d, sm, m;
2639 dst.split_code(&sd, &d);
2640 src.split_code(&sm, &m);
2641 emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002642}
2643
2644
Leon Clarkee46be812010-01-19 14:06:41 +00002645void Assembler::vmov(const DwVfpRegister dst,
Steve Block8defd9f2010-07-08 12:39:36 +01002646 const DwVfpRegister src,
2647 const Condition cond) {
2648 // Dd = Dm
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002649 // Instruction details available in ARM DDI 0406C.b, A8-938.
2650 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2651 // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2652 int vd, d;
2653 dst.split_code(&vd, &d);
2654 int vm, m;
2655 src.split_code(&vm, &m);
2656 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
2657 vm);
2658}
2659
2660
2661void Assembler::vmov(const DwVfpRegister dst,
2662 const VmovIndex index,
2663 const Register src,
2664 const Condition cond) {
2665 // Dd[index] = Rt
2666 // Instruction details available in ARM DDI 0406C.b, A8-940.
2667 // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
2668 // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2669 DCHECK(index.index == 0 || index.index == 1);
2670 int vd, d;
2671 dst.split_code(&vd, &d);
2672 emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
2673 d*B7 | B4);
2674}
2675
2676
2677void Assembler::vmov(const Register dst,
2678 const VmovIndex index,
2679 const DwVfpRegister src,
2680 const Condition cond) {
2681 // Dd[index] = Rt
2682 // Instruction details available in ARM DDI 0406C.b, A8.8.342.
2683 // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
2684 // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2685 DCHECK(index.index == 0 || index.index == 1);
2686 int vn, n;
2687 src.split_code(&vn, &n);
2688 emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
2689 0xB*B8 | n*B7 | B4);
Steve Block8defd9f2010-07-08 12:39:36 +01002690}
2691
2692
2693void Assembler::vmov(const DwVfpRegister dst,
Leon Clarkee46be812010-01-19 14:06:41 +00002694 const Register src1,
2695 const Register src2,
2696 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00002697 // Dm = <Rt,Rt2>.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002698 // Instruction details available in ARM DDI 0406C.b, A8-948.
Steve Blockd0582a62009-12-15 09:54:21 +00002699 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2700 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002701 DCHECK(!src1.is(pc) && !src2.is(pc));
2702 int vm, m;
2703 dst.split_code(&vm, &m);
Steve Blockd0582a62009-12-15 09:54:21 +00002704 emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002705 src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
Steve Blockd0582a62009-12-15 09:54:21 +00002706}
2707
2708
Leon Clarkee46be812010-01-19 14:06:41 +00002709void Assembler::vmov(const Register dst1,
2710 const Register dst2,
2711 const DwVfpRegister src,
2712 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00002713 // <Rt,Rt2> = Dm.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002714 // Instruction details available in ARM DDI 0406C.b, A8-948.
Steve Blockd0582a62009-12-15 09:54:21 +00002715 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2716 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002717 DCHECK(!dst1.is(pc) && !dst2.is(pc));
2718 int vm, m;
2719 src.split_code(&vm, &m);
Steve Blockd0582a62009-12-15 09:54:21 +00002720 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002721 dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
Steve Blockd0582a62009-12-15 09:54:21 +00002722}
2723
2724
Leon Clarkee46be812010-01-19 14:06:41 +00002725void Assembler::vmov(const SwVfpRegister dst,
Steve Blockd0582a62009-12-15 09:54:21 +00002726 const Register src,
Steve Blockd0582a62009-12-15 09:54:21 +00002727 const Condition cond) {
2728 // Sn = Rt.
2729 // Instruction details available in ARM DDI 0406A, A8-642.
2730 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2731 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002732 DCHECK(!src.is(pc));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002733 int sn, n;
2734 dst.split_code(&sn, &n);
2735 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
Steve Blockd0582a62009-12-15 09:54:21 +00002736}
2737
2738
Leon Clarkee46be812010-01-19 14:06:41 +00002739void Assembler::vmov(const Register dst,
2740 const SwVfpRegister src,
Steve Blockd0582a62009-12-15 09:54:21 +00002741 const Condition cond) {
2742 // Rt = Sn.
2743 // Instruction details available in ARM DDI 0406A, A8-642.
2744 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2745 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002746 DCHECK(!dst.is(pc));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002747 int sn, n;
2748 src.split_code(&sn, &n);
2749 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
Steve Blockd0582a62009-12-15 09:54:21 +00002750}
2751
2752
Steve Block6ded16b2010-05-10 14:33:55 +01002753// Type of data to read from or write to VFP register.
2754// Used as specifier in generic vcvt instruction.
2755enum VFPType { S32, U32, F32, F64 };
2756
2757
2758static bool IsSignedVFPType(VFPType type) {
2759 switch (type) {
2760 case S32:
2761 return true;
2762 case U32:
2763 return false;
2764 default:
2765 UNREACHABLE();
2766 return false;
2767 }
Steve Blockd0582a62009-12-15 09:54:21 +00002768}
2769
2770
Steve Block6ded16b2010-05-10 14:33:55 +01002771static bool IsIntegerVFPType(VFPType type) {
2772 switch (type) {
2773 case S32:
2774 case U32:
2775 return true;
2776 case F32:
2777 case F64:
2778 return false;
2779 default:
2780 UNREACHABLE();
2781 return false;
2782 }
2783}
2784
2785
2786static bool IsDoubleVFPType(VFPType type) {
2787 switch (type) {
2788 case F32:
2789 return false;
2790 case F64:
2791 return true;
2792 default:
2793 UNREACHABLE();
2794 return false;
2795 }
2796}
2797
2798
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002799// Split five bit reg_code based on size of reg_type.
2800// 32-bit register codes are Vm:M
2801// 64-bit register codes are M:Vm
2802// where Vm is four bits, and M is a single bit.
2803static void SplitRegCode(VFPType reg_type,
Steve Block6ded16b2010-05-10 14:33:55 +01002804 int reg_code,
2805 int* vm,
2806 int* m) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002807 DCHECK((reg_code >= 0) && (reg_code <= 31));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002808 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2809 // 32 bit type.
Steve Block6ded16b2010-05-10 14:33:55 +01002810 *m = reg_code & 0x1;
2811 *vm = reg_code >> 1;
2812 } else {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002813 // 64 bit type.
Steve Block6ded16b2010-05-10 14:33:55 +01002814 *m = (reg_code & 0x10) >> 4;
2815 *vm = reg_code & 0x0F;
2816 }
2817}
2818
2819
2820// Encode vcvt.src_type.dst_type instruction.
2821static Instr EncodeVCVT(const VFPType dst_type,
2822 const int dst_code,
2823 const VFPType src_type,
2824 const int src_code,
Steve Block1e0659c2011-05-24 12:43:12 +01002825 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002826 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002827 DCHECK(src_type != dst_type);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002828 int D, Vd, M, Vm;
2829 SplitRegCode(src_type, src_code, &Vm, &M);
2830 SplitRegCode(dst_type, dst_code, &Vd, &D);
2831
Steve Block6ded16b2010-05-10 14:33:55 +01002832 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2833 // Conversion between IEEE floating point and 32-bit integer.
2834 // Instruction details available in ARM DDI 0406B, A8.6.295.
2835 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2836 // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002837 DCHECK(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
Steve Block6ded16b2010-05-10 14:33:55 +01002838
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002839 int sz, opc2, op;
Steve Block6ded16b2010-05-10 14:33:55 +01002840
2841 if (IsIntegerVFPType(dst_type)) {
2842 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2843 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
Russell Brenner90bac252010-11-18 13:33:46 -08002844 op = mode;
Steve Block6ded16b2010-05-10 14:33:55 +01002845 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002846 DCHECK(IsIntegerVFPType(src_type));
Steve Block6ded16b2010-05-10 14:33:55 +01002847 opc2 = 0x0;
2848 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2849 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
Steve Block6ded16b2010-05-10 14:33:55 +01002850 }
2851
2852 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2853 Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2854 } else {
2855 // Conversion between IEEE double and single precision.
2856 // Instruction details available in ARM DDI 0406B, A8.6.298.
2857 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2858 // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002859 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
Steve Block6ded16b2010-05-10 14:33:55 +01002860 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2861 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2862 }
2863}
2864
2865
2866void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2867 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002868 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002869 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08002870 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002871}
2872
2873
2874void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2875 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002876 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002877 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08002878 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002879}
2880
2881
2882void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2883 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002884 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002885 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08002886 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002887}
2888
2889
2890void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2891 const DwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002892 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002893 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08002894 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002895}
2896
2897
2898void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2899 const DwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002900 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002901 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08002902 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002903}
2904
2905
2906void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
2907 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002908 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002909 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08002910 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002911}
2912
2913
2914void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
2915 const DwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002916 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002917 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08002918 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
Steve Blockd0582a62009-12-15 09:54:21 +00002919}
2920
2921
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002922void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2923 int fraction_bits,
2924 const Condition cond) {
2925 // Instruction details available in ARM DDI 0406C.b, A8-874.
2926 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
2927 // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
2928 DCHECK(fraction_bits > 0 && fraction_bits <= 32);
2929 DCHECK(CpuFeatures::IsSupported(VFP3));
2930 int vd, d;
2931 dst.split_code(&vd, &d);
2932 int imm5 = 32 - fraction_bits;
2933 int i = imm5 & 1;
2934 int imm4 = (imm5 >> 1) & 0xf;
2935 emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
2936 vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
2937}
2938
2939
Steve Block44f0eee2011-05-26 01:26:41 +01002940void Assembler::vneg(const DwVfpRegister dst,
2941 const DwVfpRegister src,
2942 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002943 // Instruction details available in ARM DDI 0406C.b, A8-968.
2944 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
2945 // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2946 int vd, d;
2947 dst.split_code(&vd, &d);
2948 int vm, m;
2949 src.split_code(&vm, &m);
2950
2951 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
2952 m*B5 | vm);
Steve Block44f0eee2011-05-26 01:26:41 +01002953}
2954
2955
Steve Block1e0659c2011-05-24 12:43:12 +01002956void Assembler::vabs(const DwVfpRegister dst,
2957 const DwVfpRegister src,
2958 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002959 // Instruction details available in ARM DDI 0406C.b, A8-524.
2960 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2961 // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2962 int vd, d;
2963 dst.split_code(&vd, &d);
2964 int vm, m;
2965 src.split_code(&vm, &m);
2966 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
2967 m*B5 | vm);
Steve Block1e0659c2011-05-24 12:43:12 +01002968}
2969
2970
Leon Clarkee46be812010-01-19 14:06:41 +00002971void Assembler::vadd(const DwVfpRegister dst,
2972 const DwVfpRegister src1,
2973 const DwVfpRegister src2,
2974 const Condition cond) {
2975 // Dd = vadd(Dn, Dm) double precision floating point addition.
Steve Blockd0582a62009-12-15 09:54:21 +00002976 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002977 // Instruction details available in ARM DDI 0406C.b, A8-830.
2978 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2979 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
2980 int vd, d;
2981 dst.split_code(&vd, &d);
2982 int vn, n;
2983 src1.split_code(&vn, &n);
2984 int vm, m;
2985 src2.split_code(&vm, &m);
2986 emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
2987 n*B7 | m*B5 | vm);
Steve Blockd0582a62009-12-15 09:54:21 +00002988}
2989
2990
Leon Clarkee46be812010-01-19 14:06:41 +00002991void Assembler::vsub(const DwVfpRegister dst,
2992 const DwVfpRegister src1,
2993 const DwVfpRegister src2,
2994 const Condition cond) {
2995 // Dd = vsub(Dn, Dm) double precision floating point subtraction.
Steve Blockd0582a62009-12-15 09:54:21 +00002996 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002997 // Instruction details available in ARM DDI 0406C.b, A8-1086.
2998 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
2999 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3000 int vd, d;
3001 dst.split_code(&vd, &d);
3002 int vn, n;
3003 src1.split_code(&vn, &n);
3004 int vm, m;
3005 src2.split_code(&vm, &m);
3006 emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
3007 n*B7 | B6 | m*B5 | vm);
Steve Blockd0582a62009-12-15 09:54:21 +00003008}
3009
3010
Leon Clarkee46be812010-01-19 14:06:41 +00003011void Assembler::vmul(const DwVfpRegister dst,
3012 const DwVfpRegister src1,
3013 const DwVfpRegister src2,
3014 const Condition cond) {
3015 // Dd = vmul(Dn, Dm) double precision floating point multiplication.
Steve Blockd0582a62009-12-15 09:54:21 +00003016 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003017 // Instruction details available in ARM DDI 0406C.b, A8-960.
3018 // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
3019 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3020 int vd, d;
3021 dst.split_code(&vd, &d);
3022 int vn, n;
3023 src1.split_code(&vn, &n);
3024 int vm, m;
3025 src2.split_code(&vm, &m);
3026 emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
3027 n*B7 | m*B5 | vm);
3028}
3029
3030
3031void Assembler::vmla(const DwVfpRegister dst,
3032 const DwVfpRegister src1,
3033 const DwVfpRegister src2,
3034 const Condition cond) {
3035 // Instruction details available in ARM DDI 0406C.b, A8-932.
3036 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3037 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
3038 int vd, d;
3039 dst.split_code(&vd, &d);
3040 int vn, n;
3041 src1.split_code(&vn, &n);
3042 int vm, m;
3043 src2.split_code(&vm, &m);
3044 emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
3045 vm);
3046}
3047
3048
3049void Assembler::vmls(const DwVfpRegister dst,
3050 const DwVfpRegister src1,
3051 const DwVfpRegister src2,
3052 const Condition cond) {
3053 // Instruction details available in ARM DDI 0406C.b, A8-932.
3054 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3055 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
3056 int vd, d;
3057 dst.split_code(&vd, &d);
3058 int vn, n;
3059 src1.split_code(&vn, &n);
3060 int vm, m;
3061 src2.split_code(&vm, &m);
3062 emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
3063 m*B5 | vm);
Steve Blockd0582a62009-12-15 09:54:21 +00003064}
3065
3066
Leon Clarkee46be812010-01-19 14:06:41 +00003067void Assembler::vdiv(const DwVfpRegister dst,
3068 const DwVfpRegister src1,
3069 const DwVfpRegister src2,
3070 const Condition cond) {
3071 // Dd = vdiv(Dn, Dm) double precision floating point division.
Steve Blockd0582a62009-12-15 09:54:21 +00003072 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003073 // Instruction details available in ARM DDI 0406C.b, A8-882.
3074 // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
3075 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3076 int vd, d;
3077 dst.split_code(&vd, &d);
3078 int vn, n;
3079 src1.split_code(&vn, &n);
3080 int vm, m;
3081 src2.split_code(&vm, &m);
3082 emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
3083 vm);
Steve Blockd0582a62009-12-15 09:54:21 +00003084}
3085
3086
Leon Clarkee46be812010-01-19 14:06:41 +00003087void Assembler::vcmp(const DwVfpRegister src1,
3088 const DwVfpRegister src2,
Steve Blockd0582a62009-12-15 09:54:21 +00003089 const Condition cond) {
3090 // vcmp(Dd, Dm) double precision floating point comparison.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003091 // Instruction details available in ARM DDI 0406C.b, A8-864.
3092 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
3093 // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3094 int vd, d;
3095 src1.split_code(&vd, &d);
3096 int vm, m;
3097 src2.split_code(&vm, &m);
3098 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
3099 m*B5 | vm);
Steve Blockd0582a62009-12-15 09:54:21 +00003100}
3101
3102
Iain Merrick75681382010-08-19 15:07:18 +01003103void Assembler::vcmp(const DwVfpRegister src1,
3104 const double src2,
Iain Merrick75681382010-08-19 15:07:18 +01003105 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003106 // vcmp(Dd, #0.0) double precision floating point comparison.
3107 // Instruction details available in ARM DDI 0406C.b, A8-864.
3108 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
3109 // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
3110 DCHECK(src2 == 0.0);
3111 int vd, d;
3112 src1.split_code(&vd, &d);
3113 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
Iain Merrick75681382010-08-19 15:07:18 +01003114}
3115
3116
Russell Brenner90bac252010-11-18 13:33:46 -08003117void Assembler::vmsr(Register dst, Condition cond) {
3118 // Instruction details available in ARM DDI 0406A, A8-652.
3119 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
3120 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
Russell Brenner90bac252010-11-18 13:33:46 -08003121 emit(cond | 0xE*B24 | 0xE*B20 | B16 |
3122 dst.code()*B12 | 0xA*B8 | B4);
3123}
3124
3125
Steve Blockd0582a62009-12-15 09:54:21 +00003126void Assembler::vmrs(Register dst, Condition cond) {
3127 // Instruction details available in ARM DDI 0406A, A8-652.
3128 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
3129 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
Steve Blockd0582a62009-12-15 09:54:21 +00003130 emit(cond | 0xE*B24 | 0xF*B20 | B16 |
3131 dst.code()*B12 | 0xA*B8 | B4);
3132}
3133
3134
Steve Block8defd9f2010-07-08 12:39:36 +01003135void Assembler::vsqrt(const DwVfpRegister dst,
3136 const DwVfpRegister src,
3137 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003138 // Instruction details available in ARM DDI 0406C.b, A8-1058.
3139 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
3140 // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
3141 int vd, d;
3142 dst.split_code(&vd, &d);
3143 int vm, m;
3144 src.split_code(&vm, &m);
3145 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
3146 m*B5 | vm);
3147}
3148
3149
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003150void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
3151 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3152 // 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3153 // M(5) | 0(4) | Vm(3-0)
3154 DCHECK(CpuFeatures::IsSupported(ARMv8));
3155 int vd, d;
3156 dst.split_code(&vd, &d);
3157 int vm, m;
3158 src.split_code(&vm, &m);
3159 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
3160 0x5 * B9 | B8 | B6 | m * B5 | vm);
3161}
3162
3163
3164void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
3165 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3166 // 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3167 // M(5) | 0(4) | Vm(3-0)
3168 DCHECK(CpuFeatures::IsSupported(ARMv8));
3169 int vd, d;
3170 dst.split_code(&vd, &d);
3171 int vm, m;
3172 src.split_code(&vm, &m);
3173 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
3174 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3175}
3176
3177
3178void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
3179 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3180 // 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3181 // M(5) | 0(4) | Vm(3-0)
3182 DCHECK(CpuFeatures::IsSupported(ARMv8));
3183 int vd, d;
3184 dst.split_code(&vd, &d);
3185 int vm, m;
3186 src.split_code(&vm, &m);
3187 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
3188 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3189}
3190
3191
3192void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
3193 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3194 // 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3195 // M(5) | 0(4) | Vm(3-0)
3196 DCHECK(CpuFeatures::IsSupported(ARMv8));
3197 int vd, d;
3198 dst.split_code(&vd, &d);
3199 int vm, m;
3200 src.split_code(&vm, &m);
3201 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
3202 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3203}
3204
3205
3206void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
3207 const Condition cond) {
3208 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
3209 // Vd(15-12) | 101(11-9) | sz=1(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3210 DCHECK(CpuFeatures::IsSupported(ARMv8));
3211 int vd, d;
3212 dst.split_code(&vd, &d);
3213 int vm, m;
3214 src.split_code(&vm, &m);
3215 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
3216 0x5 * B9 | B8 | B7 | B6 | m * B5 | vm);
3217}
3218
3219
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003220// Support for NEON.
3221
3222void Assembler::vld1(NeonSize size,
3223 const NeonListOperand& dst,
3224 const NeonMemOperand& src) {
3225 // Instruction details available in ARM DDI 0406C.b, A8.8.320.
3226 // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
3227 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3228 DCHECK(CpuFeatures::IsSupported(NEON));
3229 int vd, d;
3230 dst.base().split_code(&vd, &d);
3231 emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
3232 dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
3233}
3234
3235
3236void Assembler::vst1(NeonSize size,
3237 const NeonListOperand& src,
3238 const NeonMemOperand& dst) {
3239 // Instruction details available in ARM DDI 0406C.b, A8.8.404.
3240 // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
3241 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3242 DCHECK(CpuFeatures::IsSupported(NEON));
3243 int vd, d;
3244 src.base().split_code(&vd, &d);
3245 emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
3246 size*B6 | dst.align()*B4 | dst.rm().code());
3247}
3248
3249
3250void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
3251 // Instruction details available in ARM DDI 0406C.b, A8.8.346.
3252 // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
3253 // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
3254 DCHECK(CpuFeatures::IsSupported(NEON));
3255 int vd, d;
3256 dst.split_code(&vd, &d);
3257 int vm, m;
3258 src.split_code(&vm, &m);
3259 emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
3260 (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
Steve Block8defd9f2010-07-08 12:39:36 +01003261}
3262
3263
Andrei Popescu31002712010-02-23 13:46:05 +00003264// Pseudo instructions.
Steve Block6ded16b2010-05-10 14:33:55 +01003265void Assembler::nop(int type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003266 // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
3267 // some of the CPU's pipeline and has to issue. Older ARM chips simply used
3268 // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
3269 // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
3270 // a type.
3271 DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
Steve Block6ded16b2010-05-10 14:33:55 +01003272 emit(al | 13*B21 | type*B12 | type);
3273}
3274
3275
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003276bool Assembler::IsMovT(Instr instr) {
3277 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3278 ((kNumRegisters-1)*B12) | // mask out register
3279 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3280 return instr == kMovtPattern;
3281}
3282
3283
3284bool Assembler::IsMovW(Instr instr) {
3285 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3286 ((kNumRegisters-1)*B12) | // mask out destination
3287 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3288 return instr == kMovwPattern;
3289}
3290
3291
3292Instr Assembler::GetMovTPattern() { return kMovtPattern; }
3293
3294
3295Instr Assembler::GetMovWPattern() { return kMovwPattern; }
3296
3297
3298Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
3299 DCHECK(immediate < 0x10000);
3300 return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
3301}
3302
3303
3304Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
3305 instruction &= ~EncodeMovwImmediate(0xffff);
3306 return instruction | EncodeMovwImmediate(immediate);
3307}
3308
3309
3310int Assembler::DecodeShiftImm(Instr instr) {
3311 int rotate = Instruction::RotateValue(instr) * 2;
3312 int immed8 = Instruction::Immed8Value(instr);
3313 return (immed8 >> rotate) | (immed8 << (32 - rotate));
3314}
3315
3316
3317Instr Assembler::PatchShiftImm(Instr instr, int immed) {
3318 uint32_t rotate_imm = 0;
3319 uint32_t immed_8 = 0;
3320 bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL);
3321 DCHECK(immed_fits);
3322 USE(immed_fits);
3323 return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
3324}
3325
3326
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003327bool Assembler::IsNop(Instr instr, int type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003328 DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
Steve Block1e0659c2011-05-24 12:43:12 +01003329 // Check for mov rx, rx where x = type.
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003330 return instr == (al | 13*B21 | type*B12 | type);
3331}
3332
3333
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003334bool Assembler::IsMovImmed(Instr instr) {
3335 return (instr & kMovImmedMask) == kMovImmedPattern;
3336}
3337
3338
3339bool Assembler::IsOrrImmed(Instr instr) {
3340 return (instr & kOrrImmedMask) == kOrrImmedPattern;
3341}
3342
3343
3344// static
Steve Blockd0582a62009-12-15 09:54:21 +00003345bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
3346 uint32_t dummy1;
3347 uint32_t dummy2;
3348 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
3349}
3350
3351
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003352bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
3353 return is_uint12(abs(imm32));
3354}
3355
3356
Andrei Popescu31002712010-02-23 13:46:05 +00003357// Debugging.
Steve Blocka7e24c12009-10-30 11:49:00 +00003358void Assembler::RecordJSReturn() {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08003359 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00003360 CheckBuffer();
3361 RecordRelocInfo(RelocInfo::JS_RETURN);
3362}
3363
3364
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003365void Assembler::RecordDebugBreakSlot() {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08003366 positions_recorder()->WriteRecordedPositions();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003367 CheckBuffer();
3368 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
3369}
3370
3371
Steve Blocka7e24c12009-10-30 11:49:00 +00003372void Assembler::RecordComment(const char* msg) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01003373 if (FLAG_code_comments) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003374 CheckBuffer();
3375 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
3376 }
3377}
3378
3379
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003380void Assembler::RecordConstPool(int size) {
3381 // We only need this for debugger support, to correctly compute offsets in the
3382 // code.
3383 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
3384}
3385
3386
Steve Blocka7e24c12009-10-30 11:49:00 +00003387void Assembler::GrowBuffer() {
3388 if (!own_buffer_) FATAL("external code buffer is too small");
3389
Andrei Popescu31002712010-02-23 13:46:05 +00003390 // Compute new buffer size.
Steve Blocka7e24c12009-10-30 11:49:00 +00003391 CodeDesc desc; // the new buffer
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003392 if (buffer_size_ < 1 * MB) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003393 desc.buffer_size = 2*buffer_size_;
3394 } else {
3395 desc.buffer_size = buffer_size_ + 1*MB;
3396 }
3397 CHECK_GT(desc.buffer_size, 0); // no overflow
3398
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003399 // Set up new buffer.
Steve Blocka7e24c12009-10-30 11:49:00 +00003400 desc.buffer = NewArray<byte>(desc.buffer_size);
3401
3402 desc.instr_size = pc_offset();
3403 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
3404
Andrei Popescu31002712010-02-23 13:46:05 +00003405 // Copy the data.
Steve Blocka7e24c12009-10-30 11:49:00 +00003406 int pc_delta = desc.buffer - buffer_;
3407 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003408 MemMove(desc.buffer, buffer_, desc.instr_size);
3409 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3410 desc.reloc_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00003411
Andrei Popescu31002712010-02-23 13:46:05 +00003412 // Switch buffers.
Steve Blocka7e24c12009-10-30 11:49:00 +00003413 DeleteArray(buffer_);
3414 buffer_ = desc.buffer;
3415 buffer_size_ = desc.buffer_size;
3416 pc_ += pc_delta;
3417 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3418 reloc_info_writer.last_pc() + pc_delta);
3419
Andrei Popescu31002712010-02-23 13:46:05 +00003420 // None of our relocation types are pc relative pointing outside the code
Steve Blocka7e24c12009-10-30 11:49:00 +00003421 // buffer nor pc absolute pointing inside the code buffer, so there is no need
Andrei Popescu31002712010-02-23 13:46:05 +00003422 // to relocate any emitted relocation entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00003423
Andrei Popescu31002712010-02-23 13:46:05 +00003424 // Relocate pending relocation entries.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003425 for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3426 RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3427 DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
Steve Blocka7e24c12009-10-30 11:49:00 +00003428 rinfo.rmode() != RelocInfo::POSITION);
3429 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
3430 rinfo.set_pc(rinfo.pc() + pc_delta);
3431 }
3432 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003433 for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3434 RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3435 DCHECK(rinfo.rmode() == RelocInfo::NONE64);
3436 rinfo.set_pc(rinfo.pc() + pc_delta);
3437 }
3438 constant_pool_builder_.Relocate(pc_delta);
Steve Blocka7e24c12009-10-30 11:49:00 +00003439}
3440
3441
Ben Murdochb0fe1622011-05-05 13:52:32 +01003442void Assembler::db(uint8_t data) {
Ben Murdochb8e0da22011-05-16 14:20:40 +01003443 // No relocation info should be pending while using db. db is used
3444 // to write pure data with no pointers and the constant pool should
3445 // be emitted before using db.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003446 DCHECK(num_pending_32_bit_reloc_info_ == 0);
3447 DCHECK(num_pending_64_bit_reloc_info_ == 0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01003448 CheckBuffer();
3449 *reinterpret_cast<uint8_t*>(pc_) = data;
3450 pc_ += sizeof(uint8_t);
3451}
3452
3453
3454void Assembler::dd(uint32_t data) {
Ben Murdochb8e0da22011-05-16 14:20:40 +01003455 // No relocation info should be pending while using dd. dd is used
3456 // to write pure data with no pointers and the constant pool should
3457 // be emitted before using dd.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003458 DCHECK(num_pending_32_bit_reloc_info_ == 0);
3459 DCHECK(num_pending_64_bit_reloc_info_ == 0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01003460 CheckBuffer();
3461 *reinterpret_cast<uint32_t*>(pc_) = data;
3462 pc_ += sizeof(uint32_t);
3463}
3464
3465
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003466void Assembler::emit_code_stub_address(Code* stub) {
3467 CheckBuffer();
3468 *reinterpret_cast<uint32_t*>(pc_) =
3469 reinterpret_cast<uint32_t>(stub->instruction_start());
3470 pc_ += sizeof(uint32_t);
3471}
3472
3473
Steve Blocka7e24c12009-10-30 11:49:00 +00003474void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003475 RelocInfo rinfo(pc_, rmode, data, NULL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003476 RecordRelocInfo(rinfo);
3477}
3478
3479
3480void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
3481 if (!RelocInfo::IsNone(rinfo.rmode())) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003482 // Don't record external references unless the heap will be serialized.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003483 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE &&
3484 !serializer_enabled() && !emit_debug_code()) {
3485 return;
Steve Blocka7e24c12009-10-30 11:49:00 +00003486 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003487 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
3488 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
3489 RelocInfo reloc_info_with_ast_id(rinfo.pc(),
3490 rinfo.rmode(),
3491 RecordedAstId().ToInt(),
3492 NULL);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003493 ClearRecordedAstId();
Ben Murdoch257744e2011-11-30 15:57:28 +00003494 reloc_info_writer.Write(&reloc_info_with_ast_id);
3495 } else {
3496 reloc_info_writer.Write(&rinfo);
3497 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003498 }
3499}
3500
3501
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003502ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry(
3503 const RelocInfo& rinfo) {
3504 if (FLAG_enable_ool_constant_pool) {
3505 return constant_pool_builder_.AddEntry(this, rinfo);
3506 } else {
3507 if (rinfo.rmode() == RelocInfo::NONE64) {
3508 DCHECK(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
3509 if (num_pending_64_bit_reloc_info_ == 0) {
3510 first_const_pool_64_use_ = pc_offset();
3511 }
3512 pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
3513 } else {
3514 DCHECK(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
3515 if (num_pending_32_bit_reloc_info_ == 0) {
3516 first_const_pool_32_use_ = pc_offset();
3517 }
3518 pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
3519 }
3520 // Make sure the constant pool is not emitted in place of the next
3521 // instruction for which we just recorded relocation info.
3522 BlockConstPoolFor(1);
3523 return ConstantPoolArray::SMALL_SECTION;
3524 }
3525}
3526
3527
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003528void Assembler::BlockConstPoolFor(int instructions) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003529 if (FLAG_enable_ool_constant_pool) {
3530 // Should be a no-op if using an out-of-line constant pool.
3531 DCHECK(num_pending_32_bit_reloc_info_ == 0);
3532 DCHECK(num_pending_64_bit_reloc_info_ == 0);
3533 return;
3534 }
3535
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003536 int pc_limit = pc_offset() + instructions * kInstrSize;
3537 if (no_const_pool_before_ < pc_limit) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003538 // Max pool start (if we need a jump and an alignment).
3539#ifdef DEBUG
3540 int start = pc_limit + kInstrSize + 2 * kPointerSize;
3541 DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
3542 (start - first_const_pool_32_use_ +
3543 num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
3544 DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
3545 (start - first_const_pool_64_use_ < kMaxDistToFPPool));
3546#endif
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003547 no_const_pool_before_ = pc_limit;
Steve Blocka7e24c12009-10-30 11:49:00 +00003548 }
3549
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003550 if (next_buffer_check_ < no_const_pool_before_) {
3551 next_buffer_check_ = no_const_pool_before_;
3552 }
3553}
Steve Blocka7e24c12009-10-30 11:49:00 +00003554
Steve Blocka7e24c12009-10-30 11:49:00 +00003555
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003556void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003557 if (FLAG_enable_ool_constant_pool) {
3558 // Should be a no-op if using an out-of-line constant pool.
3559 DCHECK(num_pending_32_bit_reloc_info_ == 0);
3560 DCHECK(num_pending_64_bit_reloc_info_ == 0);
3561 return;
3562 }
3563
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003564 // Some short sequence of instruction mustn't be broken up by constant pool
3565 // emission, such sequences are protected by calls to BlockConstPoolFor and
3566 // BlockConstPoolScope.
3567 if (is_const_pool_blocked()) {
Andrei Popescu31002712010-02-23 13:46:05 +00003568 // Something is wrong if emission is forced and blocked at the same time.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003569 DCHECK(!force_emit);
Steve Blocka7e24c12009-10-30 11:49:00 +00003570 return;
3571 }
3572
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003573 // There is nothing to do if there are no pending constant pool entries.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003574 if ((num_pending_32_bit_reloc_info_ == 0) &&
3575 (num_pending_64_bit_reloc_info_ == 0)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003576 // Calculate the offset of the next check.
3577 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3578 return;
3579 }
3580
Steve Blocka7e24c12009-10-30 11:49:00 +00003581 // Check that the code buffer is large enough before emitting the constant
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003582 // pool (include the jump over the pool and the constant pool marker and
3583 // the gap to the relocation information).
3584 int jump_instr = require_jump ? kInstrSize : 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003585 int size_up_to_marker = jump_instr + kInstrSize;
3586 int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
3587 bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
3588 bool require_64_bit_align = false;
3589 if (has_fp_values) {
3590 require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
3591 if (require_64_bit_align) {
3592 size_after_marker += kInstrSize;
3593 }
3594 size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
3595 }
3596
3597 int size = size_up_to_marker + size_after_marker;
3598
3599 // We emit a constant pool when:
3600 // * requested to do so by parameter force_emit (e.g. after each function).
3601 // * the distance from the first instruction accessing the constant pool to
3602 // any of the constant pool entries will exceed its limit the next
3603 // time the pool is checked. This is overly restrictive, but we don't emit
3604 // constant pool entries in-order so it's conservatively correct.
3605 // * the instruction doesn't require a jump after itself to jump over the
3606 // constant pool, and we're getting close to running out of range.
3607 if (!force_emit) {
3608 DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
3609 bool need_emit = false;
3610 if (has_fp_values) {
3611 int dist64 = pc_offset() +
3612 size -
3613 num_pending_32_bit_reloc_info_ * kPointerSize -
3614 first_const_pool_64_use_;
3615 if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
3616 (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
3617 need_emit = true;
3618 }
3619 }
3620 int dist32 =
3621 pc_offset() + size - first_const_pool_32_use_;
3622 if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
3623 (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
3624 need_emit = true;
3625 }
3626 if (!need_emit) return;
3627 }
3628
3629 int needed_space = size + kGap;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003630 while (buffer_space() <= needed_space) GrowBuffer();
Steve Blocka7e24c12009-10-30 11:49:00 +00003631
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003632 {
3633 // Block recursive calls to CheckConstPool.
3634 BlockConstPoolScope block_const_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003635 RecordComment("[ Constant Pool");
3636 RecordConstPool(size);
Steve Blocka7e24c12009-10-30 11:49:00 +00003637
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003638 // Emit jump over constant pool if necessary.
3639 Label after_pool;
3640 if (require_jump) {
3641 b(&after_pool);
Steve Blocka7e24c12009-10-30 11:49:00 +00003642 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003643
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003644 // Put down constant pool marker "Undefined instruction".
3645 // The data size helps disassembly know what to print.
3646 emit(kConstantPoolMarker |
3647 EncodeConstantPoolLength(size_after_marker / kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00003648
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003649 if (require_64_bit_align) {
3650 emit(kConstantPoolMarker);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003651 }
3652
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003653 // Emit 64-bit constant pool entries first: their range is smaller than
3654 // 32-bit entries.
3655 for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
3656 RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
3657
3658 DCHECK(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
3659
3660 Instr instr = instr_at(rinfo.pc());
3661 // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
3662 DCHECK((IsVldrDPcImmediateOffset(instr) &&
3663 GetVldrDRegisterImmediateOffset(instr) == 0));
3664
3665 int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3666 DCHECK(is_uint10(delta));
3667
3668 bool found = false;
3669 uint64_t value = rinfo.raw_data64();
3670 for (int j = 0; j < i; j++) {
3671 RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
3672 if (value == rinfo2.raw_data64()) {
3673 found = true;
3674 DCHECK(rinfo2.rmode() == RelocInfo::NONE64);
3675 Instr instr2 = instr_at(rinfo2.pc());
3676 DCHECK(IsVldrDPcImmediateOffset(instr2));
3677 delta = GetVldrDRegisterImmediateOffset(instr2);
3678 delta += rinfo2.pc() - rinfo.pc();
3679 break;
3680 }
3681 }
3682
3683 instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
3684
3685 if (!found) {
3686 uint64_t uint_data = rinfo.raw_data64();
3687 emit(uint_data & 0xFFFFFFFF);
3688 emit(uint_data >> 32);
3689 }
3690 }
3691
3692 // Emit 32-bit constant pool entries.
3693 for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
3694 RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
3695 DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
3696 rinfo.rmode() != RelocInfo::POSITION &&
3697 rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
3698 rinfo.rmode() != RelocInfo::CONST_POOL &&
3699 rinfo.rmode() != RelocInfo::NONE64);
3700
3701 Instr instr = instr_at(rinfo.pc());
3702
3703 // 64-bit loads shouldn't get here.
3704 DCHECK(!IsVldrDPcImmediateOffset(instr));
3705
3706 if (IsLdrPcImmediateOffset(instr) &&
3707 GetLdrRegisterImmediateOffset(instr) == 0) {
3708 int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3709 DCHECK(is_uint12(delta));
3710 // 0 is the smallest delta:
3711 // ldr rd, [pc, #0]
3712 // constant pool marker
3713 // data
3714
3715 bool found = false;
3716 if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) {
3717 for (int j = 0; j < i; j++) {
3718 RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
3719
3720 if ((rinfo2.data() == rinfo.data()) &&
3721 (rinfo2.rmode() == rinfo.rmode())) {
3722 Instr instr2 = instr_at(rinfo2.pc());
3723 if (IsLdrPcImmediateOffset(instr2)) {
3724 delta = GetLdrRegisterImmediateOffset(instr2);
3725 delta += rinfo2.pc() - rinfo.pc();
3726 found = true;
3727 break;
3728 }
3729 }
3730 }
3731 }
3732
3733 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
3734
3735 if (!found) {
3736 emit(rinfo.data());
3737 }
3738 } else {
3739 DCHECK(IsMovW(instr));
3740 }
3741 }
3742
3743 num_pending_32_bit_reloc_info_ = 0;
3744 num_pending_64_bit_reloc_info_ = 0;
3745 first_const_pool_32_use_ = -1;
3746 first_const_pool_64_use_ = -1;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003747
3748 RecordComment("]");
3749
3750 if (after_pool.is_linked()) {
3751 bind(&after_pool);
3752 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003753 }
3754
3755 // Since a constant pool was just emitted, move the check offset forward by
3756 // the standard interval.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003757 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
Steve Blocka7e24c12009-10-30 11:49:00 +00003758}
3759
3760
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003761Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
3762 if (!FLAG_enable_ool_constant_pool) {
3763 return isolate->factory()->empty_constant_pool_array();
3764 }
3765 return constant_pool_builder_.New(isolate);
3766}
3767
3768
3769void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
3770 constant_pool_builder_.Populate(this, constant_pool);
3771}
3772
3773
3774ConstantPoolBuilder::ConstantPoolBuilder()
3775 : entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {}
3776
3777
3778bool ConstantPoolBuilder::IsEmpty() {
3779 return entries_.size() == 0;
3780}
3781
3782
3783ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
3784 RelocInfo::Mode rmode) {
3785 if (rmode == RelocInfo::NONE64) {
3786 return ConstantPoolArray::INT64;
3787 } else if (!RelocInfo::IsGCRelocMode(rmode)) {
3788 return ConstantPoolArray::INT32;
3789 } else if (RelocInfo::IsCodeTarget(rmode)) {
3790 return ConstantPoolArray::CODE_PTR;
3791 } else {
3792 DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
3793 return ConstantPoolArray::HEAP_PTR;
3794 }
3795}
3796
3797
3798ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
3799 Assembler* assm, const RelocInfo& rinfo) {
3800 RelocInfo::Mode rmode = rinfo.rmode();
3801 DCHECK(rmode != RelocInfo::COMMENT &&
3802 rmode != RelocInfo::POSITION &&
3803 rmode != RelocInfo::STATEMENT_POSITION &&
3804 rmode != RelocInfo::CONST_POOL);
3805
3806 // Try to merge entries which won't be patched.
3807 int merged_index = -1;
3808 ConstantPoolArray::LayoutSection entry_section = current_section_;
3809 if (RelocInfo::IsNone(rmode) ||
3810 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
3811 size_t i;
3812 std::vector<ConstantPoolEntry>::const_iterator it;
3813 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
3814 if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
3815 // Merge with found entry.
3816 merged_index = i;
3817 entry_section = entries_[i].section_;
3818 break;
3819 }
3820 }
3821 }
3822 DCHECK(entry_section <= current_section_);
3823 entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
3824
3825 if (merged_index == -1) {
3826 // Not merged, so update the appropriate count.
3827 number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
3828 }
3829
3830 // Check if we still have room for another entry in the small section
3831 // given Arm's ldr and vldr immediate offset range.
3832 if (current_section_ == ConstantPoolArray::SMALL_SECTION &&
3833 !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) &&
3834 is_uint10(ConstantPoolArray::MaxInt64Offset(
3835 small_entries()->count_of(ConstantPoolArray::INT64))))) {
3836 current_section_ = ConstantPoolArray::EXTENDED_SECTION;
3837 }
3838 return entry_section;
3839}
3840
3841
3842void ConstantPoolBuilder::Relocate(int pc_delta) {
3843 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
3844 entry != entries_.end(); entry++) {
3845 DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
3846 entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
3847 }
3848}
3849
3850
3851Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
3852 if (IsEmpty()) {
3853 return isolate->factory()->empty_constant_pool_array();
3854 } else if (extended_entries()->is_empty()) {
3855 return isolate->factory()->NewConstantPoolArray(*small_entries());
3856 } else {
3857 DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
3858 return isolate->factory()->NewExtendedConstantPoolArray(
3859 *small_entries(), *extended_entries());
3860 }
3861}
3862
3863
3864void ConstantPoolBuilder::Populate(Assembler* assm,
3865 ConstantPoolArray* constant_pool) {
3866 DCHECK_EQ(extended_entries()->is_empty(),
3867 !constant_pool->is_extended_layout());
3868 DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
3869 constant_pool, ConstantPoolArray::SMALL_SECTION)));
3870 if (constant_pool->is_extended_layout()) {
3871 DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
3872 constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
3873 }
3874
3875 // Set up initial offsets.
3876 int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
3877 [ConstantPoolArray::NUMBER_OF_TYPES];
3878 for (int section = 0; section <= constant_pool->final_section(); section++) {
3879 int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
3880 ? small_entries()->total_count()
3881 : 0;
3882 for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
3883 ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
3884 if (number_of_entries_[section].count_of(type) != 0) {
3885 offsets[section][type] = constant_pool->OffsetOfElementAt(
3886 number_of_entries_[section].base_of(type) + section_start);
3887 }
3888 }
3889 }
3890
3891 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
3892 entry != entries_.end(); entry++) {
3893 RelocInfo rinfo = entry->rinfo_;
3894 RelocInfo::Mode rmode = entry->rinfo_.rmode();
3895 ConstantPoolArray::Type type = GetConstantPoolType(rmode);
3896
3897 // Update constant pool if necessary and get the entry's offset.
3898 int offset;
3899 if (entry->merged_index_ == -1) {
3900 offset = offsets[entry->section_][type];
3901 offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
3902 if (type == ConstantPoolArray::INT64) {
3903 constant_pool->set_at_offset(offset, rinfo.data64());
3904 } else if (type == ConstantPoolArray::INT32) {
3905 constant_pool->set_at_offset(offset,
3906 static_cast<int32_t>(rinfo.data()));
3907 } else if (type == ConstantPoolArray::CODE_PTR) {
3908 constant_pool->set_at_offset(offset,
3909 reinterpret_cast<Address>(rinfo.data()));
3910 } else {
3911 DCHECK(type == ConstantPoolArray::HEAP_PTR);
3912 constant_pool->set_at_offset(offset,
3913 reinterpret_cast<Object*>(rinfo.data()));
3914 }
3915 offset -= kHeapObjectTag;
3916 entry->merged_index_ = offset; // Stash offset for merged entries.
3917 } else {
3918 DCHECK(entry->merged_index_ < (entry - entries_.begin()));
3919 offset = entries_[entry->merged_index_].merged_index_;
3920 }
3921
3922 // Patch vldr/ldr instruction with correct offset.
3923 Instr instr = assm->instr_at(rinfo.pc());
3924 if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
3925 if (CpuFeatures::IsSupported(ARMv7)) {
3926 // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
3927 Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
3928 DCHECK((Assembler::IsMovW(instr) &&
3929 Instruction::ImmedMovwMovtValue(instr) == 0));
3930 DCHECK((Assembler::IsMovT(next_instr) &&
3931 Instruction::ImmedMovwMovtValue(next_instr) == 0));
3932 assm->instr_at_put(
3933 rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff));
3934 assm->instr_at_put(
3935 rinfo.pc() + Assembler::kInstrSize,
3936 Assembler::PatchMovwImmediate(next_instr, offset >> 16));
3937 } else {
3938 // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
3939 Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
3940 Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize);
3941 Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize);
3942 DCHECK((Assembler::IsMovImmed(instr) &&
3943 Instruction::Immed8Value(instr) == 0));
3944 DCHECK((Assembler::IsOrrImmed(instr_2) &&
3945 Instruction::Immed8Value(instr_2) == 0) &&
3946 Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2)));
3947 DCHECK((Assembler::IsOrrImmed(instr_3) &&
3948 Instruction::Immed8Value(instr_3) == 0) &&
3949 Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3)));
3950 DCHECK((Assembler::IsOrrImmed(instr_4) &&
3951 Instruction::Immed8Value(instr_4) == 0) &&
3952 Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4)));
3953 assm->instr_at_put(
3954 rinfo.pc(), Assembler::PatchShiftImm(instr, (offset & kImm8Mask)));
3955 assm->instr_at_put(
3956 rinfo.pc() + Assembler::kInstrSize,
3957 Assembler::PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
3958 assm->instr_at_put(
3959 rinfo.pc() + 2 * Assembler::kInstrSize,
3960 Assembler::PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
3961 assm->instr_at_put(
3962 rinfo.pc() + 3 * Assembler::kInstrSize,
3963 Assembler::PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
3964 }
3965 } else if (type == ConstantPoolArray::INT64) {
3966 // Instruction to patch must be 'vldr rd, [pp, #0]'.
3967 DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) &&
3968 Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
3969 DCHECK(is_uint10(offset));
3970 assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset(
3971 instr, offset));
3972 } else {
3973 // Instruction to patch must be 'ldr rd, [pp, #0]'.
3974 DCHECK((Assembler::IsLdrPpImmediateOffset(instr) &&
3975 Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
3976 DCHECK(is_uint12(offset));
3977 assm->instr_at_put(
3978 rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset));
3979 }
3980 }
3981}
3982
3983
Steve Blocka7e24c12009-10-30 11:49:00 +00003984} } // namespace v8::internal
Leon Clarkef7060e22010-06-03 12:02:55 +01003985
3986#endif // V8_TARGET_ARCH_ARM