blob: 62516e82c9461fdb4314d81b7fbc426c7619f97c [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions
6// are met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the
14// distribution.
15//
16// - Neither the name of Sun Microsystems or the names of contributors may
17// be used to endorse or promote products derived from this software without
18// specific prior written permission.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31// OF THE POSSIBILITY OF SUCH DAMAGE.
32
Leon Clarked91b9f72010-01-27 17:25:45 +000033// The original source code covered by the above license above has been
34// modified significantly by Google Inc.
Ben Murdochb8a8cc12014-11-26 15:28:44 +000035// Copyright 2012 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +000036
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000037#include "src/arm/assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000038
Ben Murdochb8a8cc12014-11-26 15:28:44 +000039#if V8_TARGET_ARCH_ARM
Leon Clarkef7060e22010-06-03 12:02:55 +010040
Ben Murdochb8a8cc12014-11-26 15:28:44 +000041#include "src/arm/assembler-arm-inl.h"
42#include "src/base/bits.h"
43#include "src/base/cpu.h"
44#include "src/macro-assembler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000045
46namespace v8 {
47namespace internal {
48
Ben Murdoch257744e2011-11-30 15:57:28 +000049// Get the CPU features enabled by the build. For cross compilation the
Ben Murdochb8a8cc12014-11-26 15:28:44 +000050// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
Ben Murdoch257744e2011-11-30 15:57:28 +000051// can be defined to enable ARMv7 and VFPv3 instructions when building the
52// snapshot.
Ben Murdochb8a8cc12014-11-26 15:28:44 +000053static unsigned CpuFeaturesImpliedByCompiler() {
54 unsigned answer = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000055#ifdef CAN_USE_ARMV8_INSTRUCTIONS
56 if (FLAG_enable_armv8) {
57 answer |= 1u << ARMv8;
58 // ARMv8 always features VFP and NEON.
59 answer |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
60 answer |= 1u << SUDIV | 1u << MLS;
61 }
62#endif // CAN_USE_ARMV8_INSTRUCTIONS
Andrei Popescu402d9372010-02-26 13:31:12 +000063#ifdef CAN_USE_ARMV7_INSTRUCTIONS
Ben Murdochb8a8cc12014-11-26 15:28:44 +000064 if (FLAG_enable_armv7) answer |= 1u << ARMv7;
65#endif // CAN_USE_ARMV7_INSTRUCTIONS
66#ifdef CAN_USE_VFP3_INSTRUCTIONS
67 if (FLAG_enable_vfp3) answer |= 1u << VFP3 | 1u << ARMv7;
68#endif // CAN_USE_VFP3_INSTRUCTIONS
69#ifdef CAN_USE_VFP32DREGS
70 if (FLAG_enable_32dregs) answer |= 1u << VFP32DREGS;
71#endif // CAN_USE_VFP32DREGS
72#ifdef CAN_USE_NEON
73 if (FLAG_enable_neon) answer |= 1u << NEON;
74#endif // CAN_USE_VFP32DREGS
75 if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
76 answer |= 1u << UNALIGNED_ACCESSES;
77 }
Ben Murdoch257744e2011-11-30 15:57:28 +000078
Andrei Popescu402d9372010-02-26 13:31:12 +000079 return answer;
80}
Andrei Popescu402d9372010-02-26 13:31:12 +000081
82
Ben Murdochb8a8cc12014-11-26 15:28:44 +000083void CpuFeatures::ProbeImpl(bool cross_compile) {
84 supported_ |= CpuFeaturesImpliedByCompiler();
Ben Murdoch097c5b22016-05-18 11:27:45 +010085 dcache_line_size_ = 64;
Ben Murdoch257744e2011-11-30 15:57:28 +000086
Ben Murdochb8a8cc12014-11-26 15:28:44 +000087 // Only use statically determined features for cross compile (snapshot).
88 if (cross_compile) return;
Ben Murdoch257744e2011-11-30 15:57:28 +000089
Andrei Popescu402d9372010-02-26 13:31:12 +000090#ifndef __arm__
Ben Murdochb8a8cc12014-11-26 15:28:44 +000091 // For the simulator build, use whatever the flags specify.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000092 if (FLAG_enable_armv8) {
93 supported_ |= 1u << ARMv8;
94 // ARMv8 always features VFP and NEON.
95 supported_ |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
96 supported_ |= 1u << SUDIV | 1u << MLS;
97 if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
98 }
Andrei Popescu31002712010-02-23 13:46:05 +000099 if (FLAG_enable_armv7) {
Steve Block6ded16b2010-05-10 14:33:55 +0100100 supported_ |= 1u << ARMv7;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000101 if (FLAG_enable_vfp3) supported_ |= 1u << VFP3;
102 if (FLAG_enable_neon) supported_ |= 1u << NEON | 1u << VFP32DREGS;
103 if (FLAG_enable_sudiv) supported_ |= 1u << SUDIV;
104 if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
105 if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
Andrei Popescu31002712010-02-23 13:46:05 +0000106 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000107 if (FLAG_enable_mls) supported_ |= 1u << MLS;
108 if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
109
110#else // __arm__
111 // Probe for additional features at runtime.
112 base::CPU cpu;
113 if (FLAG_enable_vfp3 && cpu.has_vfp3()) {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100114 // This implementation also sets the VFP flags if runtime
115 // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
116 // 0406B, page A1-6.
117 supported_ |= 1u << VFP3 | 1u << ARMv7;
Steve Blockd0582a62009-12-15 09:54:21 +0000118 }
Andrei Popescu31002712010-02-23 13:46:05 +0000119
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000120 if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
121 if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
122 if (FLAG_enable_mls && cpu.has_thumb2()) supported_ |= 1u << MLS;
123
124 if (cpu.architecture() >= 7) {
125 if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400126 if (FLAG_enable_armv8 && cpu.architecture() >= 8) {
127 supported_ |= 1u << ARMv8;
128 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000129 if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
130 // Use movw/movt for QUALCOMM ARMv7 cores.
131 if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
132 supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
133 }
Andrei Popescu31002712010-02-23 13:46:05 +0000134 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000135
136 // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
137 if (cpu.implementer() == base::CPU::ARM &&
138 (cpu.part() == base::CPU::ARM_CORTEX_A5 ||
139 cpu.part() == base::CPU::ARM_CORTEX_A9)) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100140 dcache_line_size_ = 32;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000141 }
142
143 if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400144
145 if (cpu.implementer() == base::CPU::NVIDIA &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000146 cpu.variant() == base::CPU::NVIDIA_DENVER &&
147 cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
Ben Murdochda12d292016-06-02 14:46:10 +0100148 // TODO(jkummerow): This is turned off as an experiment to see if it
149 // affects crash rates. Keep an eye on crash reports and either remove
150 // coherent cache support permanently, or re-enable it!
151 // supported_ |= 1u << COHERENT_CACHE;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400152 }
Steve Block6ded16b2010-05-10 14:33:55 +0100153#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000154
155 DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7));
156}
157
158
159void CpuFeatures::PrintTarget() {
160 const char* arm_arch = NULL;
161 const char* arm_target_type = "";
162 const char* arm_no_probe = "";
163 const char* arm_fpu = "";
164 const char* arm_thumb = "";
165 const char* arm_float_abi = NULL;
166
167#if !defined __arm__
168 arm_target_type = " simulator";
169#endif
170
171#if defined ARM_TEST_NO_FEATURE_PROBE
172 arm_no_probe = " noprobe";
173#endif
174
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000175#if defined CAN_USE_ARMV8_INSTRUCTIONS
176 arm_arch = "arm v8";
177#elif defined CAN_USE_ARMV7_INSTRUCTIONS
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000178 arm_arch = "arm v7";
179#else
180 arm_arch = "arm v6";
181#endif
182
183#if defined CAN_USE_NEON
184 arm_fpu = " neon";
185#elif defined CAN_USE_VFP3_INSTRUCTIONS
186# if defined CAN_USE_VFP32DREGS
187 arm_fpu = " vfp3";
188# else
189 arm_fpu = " vfp3-d16";
190# endif
191#else
192 arm_fpu = " vfp2";
193#endif
194
195#ifdef __arm__
196 arm_float_abi = base::OS::ArmUsingHardFloat() ? "hard" : "softfp";
197#elif USE_EABI_HARDFLOAT
198 arm_float_abi = "hard";
199#else
200 arm_float_abi = "softfp";
201#endif
202
203#if defined __arm__ && (defined __thumb__) || (defined __thumb2__)
204 arm_thumb = " thumb";
205#endif
206
207 printf("target%s%s %s%s%s %s\n",
208 arm_target_type, arm_no_probe, arm_arch, arm_fpu, arm_thumb,
209 arm_float_abi);
210}
211
212
213void CpuFeatures::PrintFeatures() {
214 printf(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000215 "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d MLS=%d"
216 "UNALIGNED_ACCESSES=%d MOVW_MOVT_IMMEDIATE_LOADS=%d COHERENT_CACHE=%d",
217 CpuFeatures::IsSupported(ARMv8),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000218 CpuFeatures::IsSupported(ARMv7),
219 CpuFeatures::IsSupported(VFP3),
220 CpuFeatures::IsSupported(VFP32DREGS),
221 CpuFeatures::IsSupported(NEON),
222 CpuFeatures::IsSupported(SUDIV),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000223 CpuFeatures::IsSupported(MLS),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000224 CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400225 CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS),
226 CpuFeatures::IsSupported(COHERENT_CACHE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000227#ifdef __arm__
228 bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
229#elif USE_EABI_HARDFLOAT
230 bool eabi_hardfloat = true;
231#else
232 bool eabi_hardfloat = false;
233#endif
234 printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
235}
236
237
238// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000239// Implementation of RelocInfo
240
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000241// static
Steve Blocka7e24c12009-10-30 11:49:00 +0000242const int RelocInfo::kApplyMask = 0;
243
244
Leon Clarkef7060e22010-06-03 12:02:55 +0100245bool RelocInfo::IsCodedSpecially() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000246 // The deserializer needs to know whether a pointer is specially coded.  Being
247 // specially coded on ARM means that it is a movw/movt instruction, or is an
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000248 // embedded constant pool entry.  These only occur if
249 // FLAG_enable_embedded_constant_pool is true.
250 return FLAG_enable_embedded_constant_pool;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000251}
252
253
254bool RelocInfo::IsInConstantPool() {
255 return Assembler::is_constant_pool_load(pc_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100256}
257
258
Steve Blocka7e24c12009-10-30 11:49:00 +0000259// -----------------------------------------------------------------------------
260// Implementation of Operand and MemOperand
261// See assembler-arm-inl.h for inlined constructors
262
263Operand::Operand(Handle<Object> handle) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000264 AllowDeferredHandleDereference using_raw_address;
Steve Blocka7e24c12009-10-30 11:49:00 +0000265 rm_ = no_reg;
266 // Verify all Objects referred by code are NOT in new space.
267 Object* obj = *handle;
Steve Blocka7e24c12009-10-30 11:49:00 +0000268 if (obj->IsHeapObject()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000269 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +0000270 imm32_ = reinterpret_cast<intptr_t>(handle.location());
271 rmode_ = RelocInfo::EMBEDDED_OBJECT;
272 } else {
273 // no relocation needed
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000274 imm32_ = reinterpret_cast<intptr_t>(obj);
275 rmode_ = RelocInfo::NONE32;
Steve Blocka7e24c12009-10-30 11:49:00 +0000276 }
277}
278
279
280Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000281 DCHECK(is_uint5(shift_imm));
282
Steve Blocka7e24c12009-10-30 11:49:00 +0000283 rm_ = rm;
284 rs_ = no_reg;
285 shift_op_ = shift_op;
286 shift_imm_ = shift_imm & 31;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000287
288 if ((shift_op == ROR) && (shift_imm == 0)) {
289 // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
290 // RRX as ROR #0 (See below).
291 shift_op = LSL;
292 } else if (shift_op == RRX) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000293 // encoded as ROR with shift_imm == 0
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000294 DCHECK(shift_imm == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000295 shift_op_ = ROR;
296 shift_imm_ = 0;
297 }
298}
299
300
301Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000302 DCHECK(shift_op != RRX);
Steve Blocka7e24c12009-10-30 11:49:00 +0000303 rm_ = rm;
304 rs_ = no_reg;
305 shift_op_ = shift_op;
306 rs_ = rs;
307}
308
309
310MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
311 rn_ = rn;
312 rm_ = no_reg;
313 offset_ = offset;
314 am_ = am;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000315
316 // Accesses below the stack pointer are not safe, and are prohibited by the
317 // ABI. We can check obvious violations here.
318 if (rn.is(sp)) {
319 if (am == Offset) DCHECK_LE(0, offset);
320 if (am == NegOffset) DCHECK_GE(0, offset);
321 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000322}
323
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000324
Steve Blocka7e24c12009-10-30 11:49:00 +0000325MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
326 rn_ = rn;
327 rm_ = rm;
328 shift_op_ = LSL;
329 shift_imm_ = 0;
330 am_ = am;
331}
332
333
334MemOperand::MemOperand(Register rn, Register rm,
335 ShiftOp shift_op, int shift_imm, AddrMode am) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000336 DCHECK(is_uint5(shift_imm));
Steve Blocka7e24c12009-10-30 11:49:00 +0000337 rn_ = rn;
338 rm_ = rm;
339 shift_op_ = shift_op;
340 shift_imm_ = shift_imm & 31;
341 am_ = am;
342}
343
344
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000345NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
346 DCHECK((am == Offset) || (am == PostIndex));
347 rn_ = rn;
348 rm_ = (am == Offset) ? pc : sp;
349 SetAlignment(align);
350}
351
352
353NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
354 rn_ = rn;
355 rm_ = rm;
356 SetAlignment(align);
357}
358
359
360void NeonMemOperand::SetAlignment(int align) {
361 switch (align) {
362 case 0:
363 align_ = 0;
364 break;
365 case 64:
366 align_ = 1;
367 break;
368 case 128:
369 align_ = 2;
370 break;
371 case 256:
372 align_ = 3;
373 break;
374 default:
375 UNREACHABLE();
376 align_ = 0;
377 break;
378 }
379}
380
381
382NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
383 base_ = base;
384 switch (registers_count) {
385 case 1:
386 type_ = nlt_1;
387 break;
388 case 2:
389 type_ = nlt_2;
390 break;
391 case 3:
392 type_ = nlt_3;
393 break;
394 case 4:
395 type_ = nlt_4;
396 break;
397 default:
398 UNREACHABLE();
399 type_ = nlt_1;
400 break;
401 }
402}
403
404
Steve Blocka7e24c12009-10-30 11:49:00 +0000405// -----------------------------------------------------------------------------
Steve Block1e0659c2011-05-24 12:43:12 +0100406// Specific instructions, constants, and masks.
Steve Blocka7e24c12009-10-30 11:49:00 +0000407
Steve Blocka7e24c12009-10-30 11:49:00 +0000408// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
409// register r is not encoded.
Steve Block1e0659c2011-05-24 12:43:12 +0100410const Instr kPushRegPattern =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000411 al | B26 | 4 | NegPreIndex | Register::kCode_sp * B16;
Steve Blocka7e24c12009-10-30 11:49:00 +0000412// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
413// register r is not encoded.
Steve Block1e0659c2011-05-24 12:43:12 +0100414const Instr kPopRegPattern =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000415 al | B26 | L | 4 | PostIndex | Register::kCode_sp * B16;
Steve Block6ded16b2010-05-10 14:33:55 +0100416// ldr rd, [pc, #offset]
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000417const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000418const Instr kLdrPCImmedPattern = 5 * B24 | L | Register::kCode_pc * B16;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000419// ldr rd, [pp, #offset]
420const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000421const Instr kLdrPpImmedPattern = 5 * B24 | L | Register::kCode_r8 * B16;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000422// ldr rd, [pp, rn]
423const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000424const Instr kLdrPpRegPattern = 7 * B24 | L | Register::kCode_r8 * B16;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000425// vldr dd, [pc, #offset]
426const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000427const Instr kVldrDPCPattern = 13 * B24 | L | Register::kCode_pc * B16 | 11 * B8;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000428// vldr dd, [pp, #offset]
429const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000430const Instr kVldrDPpPattern = 13 * B24 | L | Register::kCode_r8 * B16 | 11 * B8;
Steve Block6ded16b2010-05-10 14:33:55 +0100431// blxcc rm
432const Instr kBlxRegMask =
433 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
434const Instr kBlxRegPattern =
Steve Block1e0659c2011-05-24 12:43:12 +0100435 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100436const Instr kBlxIp = al | kBlxRegPattern | ip.code();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100437const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
438const Instr kMovMvnPattern = 0xd * B21;
439const Instr kMovMvnFlip = B22;
440const Instr kMovLeaveCCMask = 0xdff * B16;
441const Instr kMovLeaveCCPattern = 0x1a0 * B16;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100442const Instr kMovwPattern = 0x30 * B20;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000443const Instr kMovtPattern = 0x34 * B20;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100444const Instr kMovwLeaveCCFlip = 0x5 * B21;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000445const Instr kMovImmedMask = 0x7f * B21;
446const Instr kMovImmedPattern = 0x1d * B21;
447const Instr kOrrImmedMask = 0x7f * B21;
448const Instr kOrrImmedPattern = 0x1c * B21;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100449const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
450const Instr kCmpCmnPattern = 0x15 * B20;
451const Instr kCmpCmnFlip = B21;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100452const Instr kAddSubFlip = 0x6 * B21;
453const Instr kAndBicFlip = 0xe * B21;
454
Leon Clarkef7060e22010-06-03 12:02:55 +0100455// A mask for the Rd register for push, pop, ldr, str instructions.
Steve Block1e0659c2011-05-24 12:43:12 +0100456const Instr kLdrRegFpOffsetPattern =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000457 al | B26 | L | Offset | Register::kCode_fp * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100458const Instr kStrRegFpOffsetPattern =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000459 al | B26 | Offset | Register::kCode_fp * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100460const Instr kLdrRegFpNegOffsetPattern =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000461 al | B26 | L | NegOffset | Register::kCode_fp * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100462const Instr kStrRegFpNegOffsetPattern =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000463 al | B26 | NegOffset | Register::kCode_fp * B16;
Steve Block1e0659c2011-05-24 12:43:12 +0100464const Instr kLdrStrInstrTypeMask = 0xffff0000;
Steve Block1e0659c2011-05-24 12:43:12 +0100465
Steve Blocka7e24c12009-10-30 11:49:00 +0000466
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000467Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
468 : AssemblerBase(isolate, buffer, buffer_size),
469 recorded_ast_id_(TypeFeedbackId::None()),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000470 pending_32_bit_constants_(&pending_32_bit_constants_buffer_[0]),
471 pending_64_bit_constants_(&pending_64_bit_constants_buffer_[0]),
472 constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000473 positions_recorder_(this) {
474 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000475 num_pending_32_bit_constants_ = 0;
476 num_pending_64_bit_constants_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000477 next_buffer_check_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100478 const_pool_blocked_nesting_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000479 no_const_pool_before_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000480 first_const_pool_32_use_ = -1;
481 first_const_pool_64_use_ = -1;
Steve Blocka7e24c12009-10-30 11:49:00 +0000482 last_bound_pos_ = 0;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000483 ClearRecordedAstId();
Steve Blocka7e24c12009-10-30 11:49:00 +0000484}
485
486
487Assembler::~Assembler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000488 DCHECK(const_pool_blocked_nesting_ == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000489 if (pending_32_bit_constants_ != &pending_32_bit_constants_buffer_[0]) {
490 delete[] pending_32_bit_constants_;
491 }
492 if (pending_64_bit_constants_ != &pending_64_bit_constants_buffer_[0]) {
493 delete[] pending_64_bit_constants_;
494 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000495}
496
497
498void Assembler::GetCode(CodeDesc* desc) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000499 reloc_info_writer.Finish();
500
501 // Emit constant pool if necessary.
502 int constant_pool_offset = 0;
503 if (FLAG_enable_embedded_constant_pool) {
504 constant_pool_offset = EmitEmbeddedConstantPool();
505 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000506 CheckConstPool(true, false);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000507 DCHECK(num_pending_32_bit_constants_ == 0);
508 DCHECK(num_pending_64_bit_constants_ == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000509 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100510 // Set up code descriptor.
Steve Blocka7e24c12009-10-30 11:49:00 +0000511 desc->buffer = buffer_;
512 desc->buffer_size = buffer_size_;
513 desc->instr_size = pc_offset();
514 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000515 desc->constant_pool_size =
516 (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000517 desc->origin = this;
Steve Blocka7e24c12009-10-30 11:49:00 +0000518}
519
520
521void Assembler::Align(int m) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000522 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000523 DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000524 while ((pc_offset() & (m - 1)) != 0) {
525 nop();
526 }
527}
528
529
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100530void Assembler::CodeTargetAlign() {
531 // Preferred alignment of jump targets on some ARM chips.
532 Align(8);
533}
534
535
Steve Block1e0659c2011-05-24 12:43:12 +0100536Condition Assembler::GetCondition(Instr instr) {
537 return Instruction::ConditionField(instr);
538}
539
540
Steve Block6ded16b2010-05-10 14:33:55 +0100541bool Assembler::IsBranch(Instr instr) {
542 return (instr & (B27 | B25)) == (B27 | B25);
543}
544
545
546int Assembler::GetBranchOffset(Instr instr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000547 DCHECK(IsBranch(instr));
Steve Block6ded16b2010-05-10 14:33:55 +0100548 // Take the jump offset in the lower 24 bits, sign extend it and multiply it
549 // with 4 to get the offset in bytes.
Steve Block1e0659c2011-05-24 12:43:12 +0100550 return ((instr & kImm24Mask) << 8) >> 6;
Steve Block6ded16b2010-05-10 14:33:55 +0100551}
552
553
554bool Assembler::IsLdrRegisterImmediate(Instr instr) {
555 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
556}
557
558
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000559bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
560 return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
561}
562
563
Steve Block6ded16b2010-05-10 14:33:55 +0100564int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000565 DCHECK(IsLdrRegisterImmediate(instr));
Steve Block6ded16b2010-05-10 14:33:55 +0100566 bool positive = (instr & B23) == B23;
Steve Block1e0659c2011-05-24 12:43:12 +0100567 int offset = instr & kOff12Mask; // Zero extended offset.
Steve Block6ded16b2010-05-10 14:33:55 +0100568 return positive ? offset : -offset;
569}
570
571
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000572int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
573 DCHECK(IsVldrDRegisterImmediate(instr));
574 bool positive = (instr & B23) == B23;
575 int offset = instr & kOff8Mask; // Zero extended offset.
576 offset <<= 2;
577 return positive ? offset : -offset;
578}
579
580
Steve Block6ded16b2010-05-10 14:33:55 +0100581Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000582 DCHECK(IsLdrRegisterImmediate(instr));
Steve Block6ded16b2010-05-10 14:33:55 +0100583 bool positive = offset >= 0;
584 if (!positive) offset = -offset;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000585 DCHECK(is_uint12(offset));
Steve Block6ded16b2010-05-10 14:33:55 +0100586 // Set bit indicating whether the offset should be added.
587 instr = (instr & ~B23) | (positive ? B23 : 0);
588 // Set the actual offset.
Steve Block1e0659c2011-05-24 12:43:12 +0100589 return (instr & ~kOff12Mask) | offset;
Steve Block6ded16b2010-05-10 14:33:55 +0100590}
591
592
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000593Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
594 DCHECK(IsVldrDRegisterImmediate(instr));
595 DCHECK((offset & ~3) == offset); // Must be 64-bit aligned.
596 bool positive = offset >= 0;
597 if (!positive) offset = -offset;
598 DCHECK(is_uint10(offset));
599 // Set bit indicating whether the offset should be added.
600 instr = (instr & ~B23) | (positive ? B23 : 0);
601 // Set the actual offset. Its bottom 2 bits are zero.
602 return (instr & ~kOff8Mask) | (offset >> 2);
603}
604
605
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100606bool Assembler::IsStrRegisterImmediate(Instr instr) {
607 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
608}
609
610
611Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000612 DCHECK(IsStrRegisterImmediate(instr));
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100613 bool positive = offset >= 0;
614 if (!positive) offset = -offset;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000615 DCHECK(is_uint12(offset));
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100616 // Set bit indicating whether the offset should be added.
617 instr = (instr & ~B23) | (positive ? B23 : 0);
618 // Set the actual offset.
Steve Block1e0659c2011-05-24 12:43:12 +0100619 return (instr & ~kOff12Mask) | offset;
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100620}
621
622
623bool Assembler::IsAddRegisterImmediate(Instr instr) {
624 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
625}
626
627
628Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000629 DCHECK(IsAddRegisterImmediate(instr));
630 DCHECK(offset >= 0);
631 DCHECK(is_uint12(offset));
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100632 // Set the offset.
Steve Block1e0659c2011-05-24 12:43:12 +0100633 return (instr & ~kOff12Mask) | offset;
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100634}
635
636
Leon Clarkef7060e22010-06-03 12:02:55 +0100637Register Assembler::GetRd(Instr instr) {
638 Register reg;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000639 reg.reg_code = Instruction::RdValue(instr);
Steve Block1e0659c2011-05-24 12:43:12 +0100640 return reg;
641}
642
643
644Register Assembler::GetRn(Instr instr) {
645 Register reg;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000646 reg.reg_code = Instruction::RnValue(instr);
Steve Block1e0659c2011-05-24 12:43:12 +0100647 return reg;
648}
649
650
651Register Assembler::GetRm(Instr instr) {
652 Register reg;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000653 reg.reg_code = Instruction::RmValue(instr);
Leon Clarkef7060e22010-06-03 12:02:55 +0100654 return reg;
655}
656
657
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000658Instr Assembler::GetConsantPoolLoadPattern() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000659 if (FLAG_enable_embedded_constant_pool) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000660 return kLdrPpImmedPattern;
661 } else {
662 return kLdrPCImmedPattern;
663 }
664}
665
666
667Instr Assembler::GetConsantPoolLoadMask() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000668 if (FLAG_enable_embedded_constant_pool) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000669 return kLdrPpImmedMask;
670 } else {
671 return kLdrPCImmedMask;
672 }
673}
674
675
Leon Clarkef7060e22010-06-03 12:02:55 +0100676bool Assembler::IsPush(Instr instr) {
677 return ((instr & ~kRdMask) == kPushRegPattern);
678}
679
680
681bool Assembler::IsPop(Instr instr) {
682 return ((instr & ~kRdMask) == kPopRegPattern);
683}
684
685
686bool Assembler::IsStrRegFpOffset(Instr instr) {
687 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
688}
689
690
691bool Assembler::IsLdrRegFpOffset(Instr instr) {
692 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
693}
694
695
696bool Assembler::IsStrRegFpNegOffset(Instr instr) {
697 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
698}
699
700
701bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
702 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
703}
704
705
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800706bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
707 // Check the instruction is indeed a
708 // ldr<cond> <Rd>, [pc +/- offset_12].
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000709 return (instr & kLdrPCImmedMask) == kLdrPCImmedPattern;
710}
711
712
713bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
714 // Check the instruction is indeed a
715 // ldr<cond> <Rd>, [pp +/- offset_12].
716 return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern;
717}
718
719
720bool Assembler::IsLdrPpRegOffset(Instr instr) {
721 // Check the instruction is indeed a
722 // ldr<cond> <Rd>, [pp, +/- <Rm>].
723 return (instr & kLdrPpRegMask) == kLdrPpRegPattern;
724}
725
726
727Instr Assembler::GetLdrPpRegOffsetPattern() { return kLdrPpRegPattern; }
728
729
730bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
731 // Check the instruction is indeed a
732 // vldr<cond> <Dd>, [pc +/- offset_10].
733 return (instr & kVldrDPCMask) == kVldrDPCPattern;
734}
735
736
737bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
738 // Check the instruction is indeed a
739 // vldr<cond> <Dd>, [pp +/- offset_10].
740 return (instr & kVldrDPpMask) == kVldrDPpPattern;
741}
742
743
744bool Assembler::IsBlxReg(Instr instr) {
745 // Check the instruction is indeed a
746 // blxcc <Rm>
747 return (instr & kBlxRegMask) == kBlxRegPattern;
748}
749
750
751bool Assembler::IsBlxIp(Instr instr) {
752 // Check the instruction is indeed a
753 // blx ip
754 return instr == kBlxIp;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800755}
756
757
Steve Block1e0659c2011-05-24 12:43:12 +0100758bool Assembler::IsTstImmediate(Instr instr) {
759 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
760 (I | TST | S);
761}
762
763
764bool Assembler::IsCmpRegister(Instr instr) {
765 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
766 (CMP | S);
767}
768
769
770bool Assembler::IsCmpImmediate(Instr instr) {
771 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
772 (I | CMP | S);
773}
774
775
776Register Assembler::GetCmpImmediateRegister(Instr instr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000777 DCHECK(IsCmpImmediate(instr));
Steve Block1e0659c2011-05-24 12:43:12 +0100778 return GetRn(instr);
779}
780
781
782int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000783 DCHECK(IsCmpImmediate(instr));
Steve Block1e0659c2011-05-24 12:43:12 +0100784 return instr & kOff12Mask;
785}
786
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000787
Steve Blocka7e24c12009-10-30 11:49:00 +0000788// Labels refer to positions in the (to be) generated code.
789// There are bound, linked, and unused labels.
790//
791// Bound labels refer to known positions in the already
792// generated code. pos() is the position the label refers to.
793//
794// Linked labels refer to unknown positions in the code
795// to be generated; pos() is the position of the last
796// instruction using the label.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000797//
798// The linked labels form a link chain by making the branch offset
799// in the instruction steam to point to the previous branch
800// instruction using the same label.
801//
802// The link chain is terminated by a branch offset pointing to the
803// same position.
Steve Blocka7e24c12009-10-30 11:49:00 +0000804
805
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000806int Assembler::target_at(int pos) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000807 Instr instr = instr_at(pos);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000808 if (is_uint24(instr)) {
809 // Emitted link to a label, not part of a branch.
810 return instr;
Steve Blocka7e24c12009-10-30 11:49:00 +0000811 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000812 DCHECK_EQ(5 * B25, instr & 7 * B25); // b, bl, or blx imm24
Steve Block1e0659c2011-05-24 12:43:12 +0100813 int imm26 = ((instr & kImm24Mask) << 8) >> 6;
814 if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
815 ((instr & B24) != 0)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000816 // blx uses bit 24 to encode bit 2 of imm26
817 imm26 += 2;
Steve Block6ded16b2010-05-10 14:33:55 +0100818 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000819 return pos + kPcLoadDelta + imm26;
820}
821
822
823void Assembler::target_at_put(int pos, int target_pos) {
824 Instr instr = instr_at(pos);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000825 if (is_uint24(instr)) {
826 DCHECK(target_pos == pos || target_pos >= 0);
827 // Emitted link to a label, not part of a branch.
828 // Load the position of the label relative to the generated code object
829 // pointer in a register.
830
831 // Here are the instructions we need to emit:
832 // For ARMv7: target24 => target16_1:target16_0
833 // movw dst, #target16_0
834 // movt dst, #target16_1
835 // For ARMv6: target24 => target8_2:target8_1:target8_0
836 // mov dst, #target8_0
837 // orr dst, dst, #target8_1 << 8
838 // orr dst, dst, #target8_2 << 16
839
840 // We extract the destination register from the emitted nop instruction.
841 Register dst = Register::from_code(
842 Instruction::RmValue(instr_at(pos + kInstrSize)));
843 DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
844 uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
845 DCHECK(is_uint24(target24));
846 if (is_uint8(target24)) {
847 // If the target fits in a byte then only patch with a mov
848 // instruction.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000849 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 1,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000850 CodePatcher::DONT_FLUSH);
851 patcher.masm()->mov(dst, Operand(target24));
852 } else {
853 uint16_t target16_0 = target24 & kImm16Mask;
854 uint16_t target16_1 = target24 >> 16;
855 if (CpuFeatures::IsSupported(ARMv7)) {
856 // Patch with movw/movt.
857 if (target16_1 == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000858 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
859 1, CodePatcher::DONT_FLUSH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000860 patcher.masm()->movw(dst, target16_0);
861 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000862 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
863 2, CodePatcher::DONT_FLUSH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000864 patcher.masm()->movw(dst, target16_0);
865 patcher.masm()->movt(dst, target16_1);
866 }
867 } else {
868 // Patch with a sequence of mov/orr/orr instructions.
869 uint8_t target8_0 = target16_0 & kImm8Mask;
870 uint8_t target8_1 = target16_0 >> 8;
871 uint8_t target8_2 = target16_1 & kImm8Mask;
872 if (target8_2 == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000873 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
874 2, CodePatcher::DONT_FLUSH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000875 patcher.masm()->mov(dst, Operand(target8_0));
876 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
877 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000878 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
879 3, CodePatcher::DONT_FLUSH);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000880 patcher.masm()->mov(dst, Operand(target8_0));
881 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
882 patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
883 }
884 }
885 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000886 return;
887 }
888 int imm26 = target_pos - (pos + kPcLoadDelta);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000889 DCHECK_EQ(5 * B25, instr & 7 * B25); // b, bl, or blx imm24
Steve Block1e0659c2011-05-24 12:43:12 +0100890 if (Instruction::ConditionField(instr) == kSpecialCondition) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000891 // blx uses bit 24 to encode bit 2 of imm26
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000892 DCHECK_EQ(0, imm26 & 1);
893 instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1) * B24;
Steve Blocka7e24c12009-10-30 11:49:00 +0000894 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000895 DCHECK_EQ(0, imm26 & 3);
Steve Block1e0659c2011-05-24 12:43:12 +0100896 instr &= ~kImm24Mask;
Steve Blocka7e24c12009-10-30 11:49:00 +0000897 }
898 int imm24 = imm26 >> 2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000899 DCHECK(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +0100900 instr_at_put(pos, instr | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +0000901}
902
903
904void Assembler::print(Label* L) {
905 if (L->is_unused()) {
906 PrintF("unused label\n");
907 } else if (L->is_bound()) {
908 PrintF("bound label to %d\n", L->pos());
909 } else if (L->is_linked()) {
910 Label l = *L;
911 PrintF("unbound label");
912 while (l.is_linked()) {
913 PrintF("@ %d ", l.pos());
914 Instr instr = instr_at(l.pos());
Steve Block1e0659c2011-05-24 12:43:12 +0100915 if ((instr & ~kImm24Mask) == 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000916 PrintF("value\n");
917 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000918 DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx
Steve Block1e0659c2011-05-24 12:43:12 +0100919 Condition cond = Instruction::ConditionField(instr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000920 const char* b;
921 const char* c;
Steve Block1e0659c2011-05-24 12:43:12 +0100922 if (cond == kSpecialCondition) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000923 b = "blx";
924 c = "";
925 } else {
926 if ((instr & B24) != 0)
927 b = "bl";
928 else
929 b = "b";
930
931 switch (cond) {
932 case eq: c = "eq"; break;
933 case ne: c = "ne"; break;
934 case hs: c = "hs"; break;
935 case lo: c = "lo"; break;
936 case mi: c = "mi"; break;
937 case pl: c = "pl"; break;
938 case vs: c = "vs"; break;
939 case vc: c = "vc"; break;
940 case hi: c = "hi"; break;
941 case ls: c = "ls"; break;
942 case ge: c = "ge"; break;
943 case lt: c = "lt"; break;
944 case gt: c = "gt"; break;
945 case le: c = "le"; break;
946 case al: c = ""; break;
947 default:
948 c = "";
949 UNREACHABLE();
950 }
951 }
952 PrintF("%s%s\n", b, c);
953 }
954 next(&l);
955 }
956 } else {
957 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
958 }
959}
960
961
962void Assembler::bind_to(Label* L, int pos) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000963 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
Steve Blocka7e24c12009-10-30 11:49:00 +0000964 while (L->is_linked()) {
965 int fixup_pos = L->pos();
966 next(L); // call next before overwriting link with target at fixup_pos
967 target_at_put(fixup_pos, pos);
968 }
969 L->bind_to(pos);
970
971 // Keep track of the last bound label so we don't eliminate any instructions
972 // before a bound label.
973 if (pos > last_bound_pos_)
974 last_bound_pos_ = pos;
975}
976
977
Steve Blocka7e24c12009-10-30 11:49:00 +0000978void Assembler::bind(Label* L) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000979 DCHECK(!L->is_bound()); // label can only be bound once
Steve Blocka7e24c12009-10-30 11:49:00 +0000980 bind_to(L, pc_offset());
981}
982
983
984void Assembler::next(Label* L) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000985 DCHECK(L->is_linked());
Steve Blocka7e24c12009-10-30 11:49:00 +0000986 int link = target_at(L->pos());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000987 if (link == L->pos()) {
988 // Branch target points to the same instuction. This is the end of the link
989 // chain.
Steve Blocka7e24c12009-10-30 11:49:00 +0000990 L->Unuse();
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000991 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000992 DCHECK(link >= 0);
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000993 L->link_to(link);
Steve Blocka7e24c12009-10-30 11:49:00 +0000994 }
995}
996
997
Andrei Popescu31002712010-02-23 13:46:05 +0000998// Low-level code emission routines depending on the addressing mode.
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100999// If this returns true then you have to use the rotate_imm and immed_8
1000// that it returns, because it may have already changed the instruction
1001// to match them!
Steve Blocka7e24c12009-10-30 11:49:00 +00001002static bool fits_shifter(uint32_t imm32,
1003 uint32_t* rotate_imm,
1004 uint32_t* immed_8,
1005 Instr* instr) {
Andrei Popescu31002712010-02-23 13:46:05 +00001006 // imm32 must be unsigned.
Steve Blocka7e24c12009-10-30 11:49:00 +00001007 for (int rot = 0; rot < 16; rot++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001008 uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
Steve Blocka7e24c12009-10-30 11:49:00 +00001009 if ((imm8 <= 0xff)) {
1010 *rotate_imm = rot;
1011 *immed_8 = imm8;
1012 return true;
1013 }
1014 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001015 // If the opcode is one with a complementary version and the complementary
1016 // immediate fits, change the opcode.
1017 if (instr != NULL) {
1018 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
1019 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1020 *instr ^= kMovMvnFlip;
1021 return true;
1022 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001023 if (CpuFeatures::IsSupported(ARMv7)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001024 if (imm32 < 0x10000) {
1025 *instr ^= kMovwLeaveCCFlip;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001026 *instr |= Assembler::EncodeMovwImmediate(imm32);
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001027 *rotate_imm = *immed_8 = 0; // Not used for movw.
1028 return true;
1029 }
1030 }
1031 }
1032 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001033 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001034 *instr ^= kCmpCmnFlip;
1035 return true;
1036 }
1037 } else {
1038 Instr alu_insn = (*instr & kALUMask);
Steve Block1e0659c2011-05-24 12:43:12 +01001039 if (alu_insn == ADD ||
1040 alu_insn == SUB) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001041 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001042 *instr ^= kAddSubFlip;
1043 return true;
1044 }
Steve Block1e0659c2011-05-24 12:43:12 +01001045 } else if (alu_insn == AND ||
1046 alu_insn == BIC) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001047 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1048 *instr ^= kAndBicFlip;
1049 return true;
1050 }
1051 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001052 }
1053 }
1054 return false;
1055}
1056
1057
1058// We have to use the temporary register for things that can be relocated even
1059// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
1060// space. There is no guarantee that the relocated location can be similarly
1061// encoded.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001062bool Operand::must_output_reloc_info(const Assembler* assembler) const {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001063 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001064 if (assembler != NULL && assembler->predictable_code_size()) return true;
1065 return assembler->serializer_enabled();
1066 } else if (RelocInfo::IsNone(rmode_)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001067 return false;
1068 }
1069 return true;
1070}
1071
1072
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001073static bool use_mov_immediate_load(const Operand& x,
1074 const Assembler* assembler) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001075 if (FLAG_enable_embedded_constant_pool && assembler != NULL &&
1076 !assembler->is_constant_pool_available()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001077 return true;
1078 } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
1079 (assembler == NULL || !assembler->predictable_code_size())) {
1080 // Prefer movw / movt to constant pool if it is more efficient on the CPU.
1081 return true;
1082 } else if (x.must_output_reloc_info(assembler)) {
1083 // Prefer constant pool if data is likely to be patched.
1084 return false;
1085 } else {
1086 // Otherwise, use immediate load if movw / movt is available.
1087 return CpuFeatures::IsSupported(ARMv7);
1088 }
1089}
1090
1091
1092int Operand::instructions_required(const Assembler* assembler,
1093 Instr instr) const {
1094 if (rm_.is_valid()) return 1;
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001095 uint32_t dummy1, dummy2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001096 if (must_output_reloc_info(assembler) ||
Steve Block44f0eee2011-05-26 01:26:41 +01001097 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1098 // The immediate operand cannot be encoded as a shifter operand, or use of
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001099 // constant pool is required. First account for the instructions required
1100 // for the constant pool or immediate load
1101 int instructions;
1102 if (use_mov_immediate_load(*this, assembler)) {
1103 // A movw / movt or mov / orr immediate load.
1104 instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001105 } else if (assembler != NULL &&
1106 assembler->ConstantPoolAccessIsInOverflow()) {
1107 // An overflowed constant pool load.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001108 instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
Steve Block44f0eee2011-05-26 01:26:41 +01001109 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001110 // A small constant pool load.
1111 instructions = 1;
Steve Block44f0eee2011-05-26 01:26:41 +01001112 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001113
1114 if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set
1115 // For a mov or mvn instruction which doesn't set the condition
1116 // code, the constant pool or immediate load is enough, otherwise we need
1117 // to account for the actual instruction being requested.
1118 instructions += 1;
1119 }
1120 return instructions;
Steve Block44f0eee2011-05-26 01:26:41 +01001121 } else {
1122 // No use of constant pool and the immediate operand can be encoded as a
1123 // shifter operand.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001124 return 1;
1125 }
1126}
1127
1128
1129void Assembler::move_32_bit_immediate(Register rd,
1130 const Operand& x,
1131 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001132 uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
1133 if (x.must_output_reloc_info(this)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001134 RecordRelocInfo(x.rmode_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001135 }
1136
1137 if (use_mov_immediate_load(x, this)) {
1138 Register target = rd.code() == pc.code() ? ip : rd;
1139 if (CpuFeatures::IsSupported(ARMv7)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001140 if (!FLAG_enable_embedded_constant_pool &&
1141 x.must_output_reloc_info(this)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001142 // Make sure the movw/movt doesn't get separated.
1143 BlockConstPoolFor(2);
1144 }
1145 movw(target, imm32 & 0xffff, cond);
1146 movt(target, imm32 >> 16, cond);
1147 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001148 DCHECK(FLAG_enable_embedded_constant_pool);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001149 mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
1150 orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
1151 orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
1152 orr(target, target, Operand(imm32 & (kImm8Mask << 24)), LeaveCC, cond);
1153 }
1154 if (target.code() != rd.code()) {
1155 mov(rd, target, LeaveCC, cond);
1156 }
1157 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001158 DCHECK(!FLAG_enable_embedded_constant_pool || is_constant_pool_available());
1159 ConstantPoolEntry::Access access =
1160 ConstantPoolAddEntry(pc_offset(), x.rmode_, x.imm32_);
1161 if (access == ConstantPoolEntry::OVERFLOWED) {
1162 DCHECK(FLAG_enable_embedded_constant_pool);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001163 Register target = rd.code() == pc.code() ? ip : rd;
1164 // Emit instructions to load constant pool offset.
1165 if (CpuFeatures::IsSupported(ARMv7)) {
1166 movw(target, 0, cond);
1167 movt(target, 0, cond);
1168 } else {
1169 mov(target, Operand(0), LeaveCC, cond);
1170 orr(target, target, Operand(0), LeaveCC, cond);
1171 orr(target, target, Operand(0), LeaveCC, cond);
1172 orr(target, target, Operand(0), LeaveCC, cond);
1173 }
1174 // Load from constant pool at offset.
1175 ldr(rd, MemOperand(pp, target), cond);
1176 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001177 DCHECK(access == ConstantPoolEntry::REGULAR);
1178 ldr(rd, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0),
1179 cond);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001180 }
Steve Block44f0eee2011-05-26 01:26:41 +01001181 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001182}
1183
1184
Steve Blocka7e24c12009-10-30 11:49:00 +00001185void Assembler::addrmod1(Instr instr,
1186 Register rn,
1187 Register rd,
1188 const Operand& x) {
1189 CheckBuffer();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001190 DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001191 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001192 // Immediate.
Steve Blocka7e24c12009-10-30 11:49:00 +00001193 uint32_t rotate_imm;
1194 uint32_t immed_8;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001195 if (x.must_output_reloc_info(this) ||
Steve Blocka7e24c12009-10-30 11:49:00 +00001196 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
1197 // The immediate operand cannot be encoded as a shifter operand, so load
1198 // it first to register ip and change the original instruction to use ip.
1199 // However, if the original instruction is a 'mov rd, x' (not setting the
Andrei Popescu31002712010-02-23 13:46:05 +00001200 // condition code), then replace it with a 'ldr rd, [pc]'.
Steve Blocka7e24c12009-10-30 11:49:00 +00001201 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Steve Block1e0659c2011-05-24 12:43:12 +01001202 Condition cond = Instruction::ConditionField(instr);
1203 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001204 move_32_bit_immediate(rd, x, cond);
Steve Blocka7e24c12009-10-30 11:49:00 +00001205 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001206 mov(ip, x, LeaveCC, cond);
Steve Blocka7e24c12009-10-30 11:49:00 +00001207 addrmod1(instr, rn, rd, Operand(ip));
1208 }
1209 return;
1210 }
1211 instr |= I | rotate_imm*B8 | immed_8;
1212 } else if (!x.rs_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001213 // Immediate shift.
Steve Blocka7e24c12009-10-30 11:49:00 +00001214 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1215 } else {
Andrei Popescu31002712010-02-23 13:46:05 +00001216 // Register shift.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001217 DCHECK(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001218 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
1219 }
1220 emit(instr | rn.code()*B16 | rd.code()*B12);
Kristian Monsen50ef84f2010-07-29 15:18:00 +01001221 if (rn.is(pc) || x.rm_.is(pc)) {
Andrei Popescu31002712010-02-23 13:46:05 +00001222 // Block constant pool emission for one instruction after reading pc.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001223 BlockConstPoolFor(1);
Kristian Monsen50ef84f2010-07-29 15:18:00 +01001224 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001225}
1226
1227
1228void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001229 DCHECK((instr & ~(kCondMask | B | L)) == B26);
Steve Blocka7e24c12009-10-30 11:49:00 +00001230 int am = x.am_;
1231 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001232 // Immediate offset.
Steve Blocka7e24c12009-10-30 11:49:00 +00001233 int offset_12 = x.offset_;
1234 if (offset_12 < 0) {
1235 offset_12 = -offset_12;
1236 am ^= U;
1237 }
1238 if (!is_uint12(offset_12)) {
Andrei Popescu31002712010-02-23 13:46:05 +00001239 // Immediate offset cannot be encoded, load it first to register ip
1240 // rn (and rd in a load) should never be ip, or will be trashed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001241 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
Steve Block1e0659c2011-05-24 12:43:12 +01001242 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
Steve Blocka7e24c12009-10-30 11:49:00 +00001243 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
1244 return;
1245 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001246 DCHECK(offset_12 >= 0); // no masking needed
Steve Blocka7e24c12009-10-30 11:49:00 +00001247 instr |= offset_12;
1248 } else {
Andrei Popescu31002712010-02-23 13:46:05 +00001249 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
Steve Blocka7e24c12009-10-30 11:49:00 +00001250 // register offset the constructors make sure than both shift_imm_
Andrei Popescu31002712010-02-23 13:46:05 +00001251 // and shift_op_ are initialized.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001252 DCHECK(!x.rm_.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001253 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1254 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001255 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
Steve Blocka7e24c12009-10-30 11:49:00 +00001256 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1257}
1258
1259
1260void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001261 DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
1262 DCHECK(x.rn_.is_valid());
Steve Blocka7e24c12009-10-30 11:49:00 +00001263 int am = x.am_;
1264 if (!x.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001265 // Immediate offset.
Steve Blocka7e24c12009-10-30 11:49:00 +00001266 int offset_8 = x.offset_;
1267 if (offset_8 < 0) {
1268 offset_8 = -offset_8;
1269 am ^= U;
1270 }
1271 if (!is_uint8(offset_8)) {
Andrei Popescu31002712010-02-23 13:46:05 +00001272 // Immediate offset cannot be encoded, load it first to register ip
1273 // rn (and rd in a load) should never be ip, or will be trashed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001274 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
Steve Block1e0659c2011-05-24 12:43:12 +01001275 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
Steve Blocka7e24c12009-10-30 11:49:00 +00001276 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1277 return;
1278 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001279 DCHECK(offset_8 >= 0); // no masking needed
Steve Blocka7e24c12009-10-30 11:49:00 +00001280 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
1281 } else if (x.shift_imm_ != 0) {
Andrei Popescu31002712010-02-23 13:46:05 +00001282 // Scaled register offset not supported, load index first
1283 // rn (and rd in a load) should never be ip, or will be trashed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001284 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
Steve Blocka7e24c12009-10-30 11:49:00 +00001285 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
Steve Block1e0659c2011-05-24 12:43:12 +01001286 Instruction::ConditionField(instr));
Steve Blocka7e24c12009-10-30 11:49:00 +00001287 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1288 return;
1289 } else {
Andrei Popescu31002712010-02-23 13:46:05 +00001290 // Register offset.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001291 DCHECK((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
Steve Blocka7e24c12009-10-30 11:49:00 +00001292 instr |= x.rm_.code();
1293 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001294 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
Steve Blocka7e24c12009-10-30 11:49:00 +00001295 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1296}
1297
1298
1299void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001300 DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
1301 DCHECK(rl != 0);
1302 DCHECK(!rn.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001303 emit(instr | rn.code()*B16 | rl);
1304}
1305
1306
1307void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
Andrei Popescu31002712010-02-23 13:46:05 +00001308 // Unindexed addressing is not encoded by this function.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001309 DCHECK_EQ((B27 | B26),
Steve Block1e0659c2011-05-24 12:43:12 +01001310 (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001311 DCHECK(x.rn_.is_valid() && !x.rm_.is_valid());
Steve Blocka7e24c12009-10-30 11:49:00 +00001312 int am = x.am_;
1313 int offset_8 = x.offset_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001314 DCHECK((offset_8 & 3) == 0); // offset must be an aligned word offset
Steve Blocka7e24c12009-10-30 11:49:00 +00001315 offset_8 >>= 2;
1316 if (offset_8 < 0) {
1317 offset_8 = -offset_8;
1318 am ^= U;
1319 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001320 DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte
1321 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
Steve Blocka7e24c12009-10-30 11:49:00 +00001322
Andrei Popescu31002712010-02-23 13:46:05 +00001323 // Post-indexed addressing requires W == 1; different than in addrmod2/3.
Steve Blocka7e24c12009-10-30 11:49:00 +00001324 if ((am & P) == 0)
1325 am |= W;
1326
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001327 DCHECK(offset_8 >= 0); // no masking needed
Steve Blocka7e24c12009-10-30 11:49:00 +00001328 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
1329}
1330
1331
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001332int Assembler::branch_offset(Label* L) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001333 int target_pos;
1334 if (L->is_bound()) {
1335 target_pos = L->pos();
1336 } else {
1337 if (L->is_linked()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001338 // Point to previous instruction that uses the link.
1339 target_pos = L->pos();
Steve Blocka7e24c12009-10-30 11:49:00 +00001340 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001341 // First entry of the link chain points to itself.
1342 target_pos = pc_offset();
Steve Blocka7e24c12009-10-30 11:49:00 +00001343 }
1344 L->link_to(pc_offset());
1345 }
1346
1347 // Block the emission of the constant pool, since the branch instruction must
Andrei Popescu31002712010-02-23 13:46:05 +00001348 // be emitted at the pc offset recorded by the label.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001349 if (!is_const_pool_blocked()) BlockConstPoolFor(1);
1350
Steve Blocka7e24c12009-10-30 11:49:00 +00001351 return target_pos - (pc_offset() + kPcLoadDelta);
1352}
1353
1354
Andrei Popescu31002712010-02-23 13:46:05 +00001355// Branch instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001356void Assembler::b(int branch_offset, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001357 DCHECK((branch_offset & 3) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001358 int imm24 = branch_offset >> 2;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001359 CHECK(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +01001360 emit(cond | B27 | B25 | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001361
Steve Block6ded16b2010-05-10 14:33:55 +01001362 if (cond == al) {
Andrei Popescu31002712010-02-23 13:46:05 +00001363 // Dead code is a good location to emit the constant pool.
Steve Blocka7e24c12009-10-30 11:49:00 +00001364 CheckConstPool(false, false);
Steve Block6ded16b2010-05-10 14:33:55 +01001365 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001366}
1367
1368
1369void Assembler::bl(int branch_offset, Condition cond) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001370 positions_recorder()->WriteRecordedPositions();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001371 DCHECK((branch_offset & 3) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001372 int imm24 = branch_offset >> 2;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001373 CHECK(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +01001374 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001375}
1376
1377
1378void Assembler::blx(int branch_offset) { // v5 and above
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001379 positions_recorder()->WriteRecordedPositions();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001380 DCHECK((branch_offset & 1) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001381 int h = ((branch_offset & 2) >> 1)*B24;
1382 int imm24 = branch_offset >> 2;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001383 CHECK(is_int24(imm24));
Steve Block1e0659c2011-05-24 12:43:12 +01001384 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
Steve Blocka7e24c12009-10-30 11:49:00 +00001385}
1386
1387
1388void Assembler::blx(Register target, Condition cond) { // v5 and above
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001389 positions_recorder()->WriteRecordedPositions();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001390 DCHECK(!target.is(pc));
Steve Block1e0659c2011-05-24 12:43:12 +01001391 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001392}
1393
1394
1395void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001396 positions_recorder()->WriteRecordedPositions();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001397 DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged
Steve Block1e0659c2011-05-24 12:43:12 +01001398 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001399}
1400
1401
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001402void Assembler::b(Label* L, Condition cond) {
1403 CheckBuffer();
1404 b(branch_offset(L), cond);
1405}
1406
1407
1408void Assembler::bl(Label* L, Condition cond) {
1409 CheckBuffer();
1410 bl(branch_offset(L), cond);
1411}
1412
1413
1414void Assembler::blx(Label* L) {
1415 CheckBuffer();
1416 blx(branch_offset(L));
1417}
1418
1419
Andrei Popescu31002712010-02-23 13:46:05 +00001420// Data-processing instructions.
1421
Steve Blocka7e24c12009-10-30 11:49:00 +00001422void Assembler::and_(Register dst, Register src1, const Operand& src2,
1423 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001424 addrmod1(cond | AND | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001425}
1426
1427
1428void Assembler::eor(Register dst, Register src1, const Operand& src2,
1429 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001430 addrmod1(cond | EOR | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001431}
1432
1433
1434void Assembler::sub(Register dst, Register src1, const Operand& src2,
1435 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001436 addrmod1(cond | SUB | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001437}
1438
1439
1440void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1441 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001442 addrmod1(cond | RSB | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001443}
1444
1445
1446void Assembler::add(Register dst, Register src1, const Operand& src2,
1447 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001448 addrmod1(cond | ADD | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001449}
1450
1451
1452void Assembler::adc(Register dst, Register src1, const Operand& src2,
1453 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001454 addrmod1(cond | ADC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001455}
1456
1457
1458void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1459 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001460 addrmod1(cond | SBC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001461}
1462
1463
1464void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1465 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001466 addrmod1(cond | RSC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001467}
1468
1469
1470void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001471 addrmod1(cond | TST | S, src1, r0, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001472}
1473
1474
1475void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001476 addrmod1(cond | TEQ | S, src1, r0, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001477}
1478
1479
1480void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001481 addrmod1(cond | CMP | S, src1, r0, src2);
1482}
1483
1484
1485void Assembler::cmp_raw_immediate(
1486 Register src, int raw_immediate, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001487 DCHECK(is_uint12(raw_immediate));
Steve Block1e0659c2011-05-24 12:43:12 +01001488 emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
Steve Blocka7e24c12009-10-30 11:49:00 +00001489}
1490
1491
1492void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001493 addrmod1(cond | CMN | S, src1, r0, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001494}
1495
1496
1497void Assembler::orr(Register dst, Register src1, const Operand& src2,
1498 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001499 addrmod1(cond | ORR | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001500}
1501
1502
1503void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1504 if (dst.is(pc)) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001505 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001506 }
Steve Block6ded16b2010-05-10 14:33:55 +01001507 // Don't allow nop instructions in the form mov rn, rn to be generated using
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001508 // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1509 // or MarkCode(int/NopMarkerTypes) pseudo instructions.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001510 DCHECK(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
Steve Block1e0659c2011-05-24 12:43:12 +01001511 addrmod1(cond | MOV | s, r0, dst, src);
Steve Blocka7e24c12009-10-30 11:49:00 +00001512}
1513
1514
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001515void Assembler::mov_label_offset(Register dst, Label* label) {
1516 if (label->is_bound()) {
1517 mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
1518 } else {
1519 // Emit the link to the label in the code stream followed by extra nop
1520 // instructions.
1521 // If the label is not linked, then start a new link chain by linking it to
1522 // itself, emitting pc_offset().
1523 int link = label->is_linked() ? label->pos() : pc_offset();
1524 label->link_to(pc_offset());
1525
1526 // When the label is bound, these instructions will be patched with a
1527 // sequence of movw/movt or mov/orr/orr instructions. They will load the
1528 // destination register with the position of the label from the beginning
1529 // of the code.
1530 //
1531 // The link will be extracted from the first instruction and the destination
1532 // register from the second.
1533 // For ARMv7:
1534 // link
1535 // mov dst, dst
1536 // For ARMv6:
1537 // link
1538 // mov dst, dst
1539 // mov dst, dst
1540 //
1541 // When the label gets bound: target_at extracts the link and target_at_put
1542 // patches the instructions.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001543 CHECK(is_uint24(link));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001544 BlockConstPoolScope block_const_pool(this);
1545 emit(link);
1546 nop(dst.code());
1547 if (!CpuFeatures::IsSupported(ARMv7)) {
1548 nop(dst.code());
1549 }
1550 }
1551}
1552
1553
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001554void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001555 DCHECK(CpuFeatures::IsSupported(ARMv7));
1556 emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001557}
1558
1559
1560void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001561 DCHECK(CpuFeatures::IsSupported(ARMv7));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001562 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1563}
1564
1565
Steve Blocka7e24c12009-10-30 11:49:00 +00001566void Assembler::bic(Register dst, Register src1, const Operand& src2,
1567 SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001568 addrmod1(cond | BIC | s, src1, dst, src2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001569}
1570
1571
1572void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001573 addrmod1(cond | MVN | s, r0, dst, src);
Steve Blocka7e24c12009-10-30 11:49:00 +00001574}
1575
1576
Andrei Popescu31002712010-02-23 13:46:05 +00001577// Multiply instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001578void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1579 SBit s, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001580 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001581 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1582 src2.code()*B8 | B7 | B4 | src1.code());
1583}
1584
1585
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001586void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1587 Condition cond) {
1588 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1589 DCHECK(IsEnabled(MLS));
1590 emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
1591 src2.code()*B8 | B7 | B4 | src1.code());
1592}
1593
1594
1595void Assembler::sdiv(Register dst, Register src1, Register src2,
1596 Condition cond) {
1597 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1598 DCHECK(IsEnabled(SUDIV));
1599 emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
1600 src2.code()*B8 | B4 | src1.code());
1601}
1602
1603
1604void Assembler::udiv(Register dst, Register src1, Register src2,
1605 Condition cond) {
1606 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1607 DCHECK(IsEnabled(SUDIV));
1608 emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
1609 src2.code() * B8 | B4 | src1.code());
1610}
1611
1612
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001613void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
1614 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001615 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001616 // dst goes in bits 16-19 for this instruction!
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001617 emit(cond | s | dst.code() * B16 | src2.code() * B8 | B7 | B4 | src1.code());
1618}
1619
1620
1621void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
1622 Condition cond) {
1623 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1624 emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 |
1625 srcA.code() * B12 | src2.code() * B8 | B4 | src1.code());
1626}
1627
1628
1629void Assembler::smmul(Register dst, Register src1, Register src2,
1630 Condition cond) {
1631 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1632 emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xf * B12 |
1633 src2.code() * B8 | B4 | src1.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001634}
1635
1636
1637void Assembler::smlal(Register dstL,
1638 Register dstH,
1639 Register src1,
1640 Register src2,
1641 SBit s,
1642 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001643 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1644 DCHECK(!dstL.is(dstH));
Steve Blocka7e24c12009-10-30 11:49:00 +00001645 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1646 src2.code()*B8 | B7 | B4 | src1.code());
1647}
1648
1649
1650void Assembler::smull(Register dstL,
1651 Register dstH,
1652 Register src1,
1653 Register src2,
1654 SBit s,
1655 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001656 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1657 DCHECK(!dstL.is(dstH));
Steve Blocka7e24c12009-10-30 11:49:00 +00001658 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1659 src2.code()*B8 | B7 | B4 | src1.code());
1660}
1661
1662
1663void Assembler::umlal(Register dstL,
1664 Register dstH,
1665 Register src1,
1666 Register src2,
1667 SBit s,
1668 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001669 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1670 DCHECK(!dstL.is(dstH));
Steve Blocka7e24c12009-10-30 11:49:00 +00001671 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1672 src2.code()*B8 | B7 | B4 | src1.code());
1673}
1674
1675
1676void Assembler::umull(Register dstL,
1677 Register dstH,
1678 Register src1,
1679 Register src2,
1680 SBit s,
1681 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001682 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1683 DCHECK(!dstL.is(dstH));
Steve Blocka7e24c12009-10-30 11:49:00 +00001684 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1685 src2.code()*B8 | B7 | B4 | src1.code());
1686}
1687
1688
Andrei Popescu31002712010-02-23 13:46:05 +00001689// Miscellaneous arithmetic instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001690void Assembler::clz(Register dst, Register src, Condition cond) {
1691 // v5 and above.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001692 DCHECK(!dst.is(pc) && !src.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001693 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
Steve Block1e0659c2011-05-24 12:43:12 +01001694 15*B8 | CLZ | src.code());
Steve Blocka7e24c12009-10-30 11:49:00 +00001695}
1696
1697
Kristian Monsen50ef84f2010-07-29 15:18:00 +01001698// Saturating instructions.
1699
1700// Unsigned saturate.
1701void Assembler::usat(Register dst,
1702 int satpos,
1703 const Operand& src,
1704 Condition cond) {
1705 // v6 and above.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001706 DCHECK(CpuFeatures::IsSupported(ARMv7));
1707 DCHECK(!dst.is(pc) && !src.rm_.is(pc));
1708 DCHECK((satpos >= 0) && (satpos <= 31));
1709 DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1710 DCHECK(src.rs_.is(no_reg));
Kristian Monsen50ef84f2010-07-29 15:18:00 +01001711
1712 int sh = 0;
1713 if (src.shift_op_ == ASR) {
1714 sh = 1;
1715 }
1716
1717 emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1718 src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1719}
1720
1721
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001722// Bitfield manipulation instructions.
1723
1724// Unsigned bit field extract.
1725// Extracts #width adjacent bits from position #lsb in a register, and
1726// writes them to the low bits of a destination register.
1727// ubfx dst, src, #lsb, #width
1728void Assembler::ubfx(Register dst,
1729 Register src,
1730 int lsb,
1731 int width,
1732 Condition cond) {
1733 // v7 and above.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001734 DCHECK(CpuFeatures::IsSupported(ARMv7));
1735 DCHECK(!dst.is(pc) && !src.is(pc));
1736 DCHECK((lsb >= 0) && (lsb <= 31));
1737 DCHECK((width >= 1) && (width <= (32 - lsb)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001738 emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1739 lsb*B7 | B6 | B4 | src.code());
1740}
1741
1742
1743// Signed bit field extract.
1744// Extracts #width adjacent bits from position #lsb in a register, and
1745// writes them to the low bits of a destination register. The extracted
1746// value is sign extended to fill the destination register.
1747// sbfx dst, src, #lsb, #width
1748void Assembler::sbfx(Register dst,
1749 Register src,
1750 int lsb,
1751 int width,
1752 Condition cond) {
1753 // v7 and above.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001754 DCHECK(CpuFeatures::IsSupported(ARMv7));
1755 DCHECK(!dst.is(pc) && !src.is(pc));
1756 DCHECK((lsb >= 0) && (lsb <= 31));
1757 DCHECK((width >= 1) && (width <= (32 - lsb)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001758 emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1759 lsb*B7 | B6 | B4 | src.code());
1760}
1761
1762
1763// Bit field clear.
1764// Sets #width adjacent bits at position #lsb in the destination register
1765// to zero, preserving the value of the other bits.
1766// bfc dst, #lsb, #width
1767void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1768 // v7 and above.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001769 DCHECK(CpuFeatures::IsSupported(ARMv7));
1770 DCHECK(!dst.is(pc));
1771 DCHECK((lsb >= 0) && (lsb <= 31));
1772 DCHECK((width >= 1) && (width <= (32 - lsb)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001773 int msb = lsb + width - 1;
1774 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1775}
1776
1777
1778// Bit field insert.
1779// Inserts #width adjacent bits from the low bits of the source register
1780// into position #lsb of the destination register.
1781// bfi dst, src, #lsb, #width
1782void Assembler::bfi(Register dst,
1783 Register src,
1784 int lsb,
1785 int width,
1786 Condition cond) {
1787 // v7 and above.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001788 DCHECK(CpuFeatures::IsSupported(ARMv7));
1789 DCHECK(!dst.is(pc) && !src.is(pc));
1790 DCHECK((lsb >= 0) && (lsb <= 31));
1791 DCHECK((width >= 1) && (width <= (32 - lsb)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001792 int msb = lsb + width - 1;
1793 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1794 src.code());
1795}
1796
1797
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001798void Assembler::pkhbt(Register dst,
1799 Register src1,
1800 const Operand& src2,
1801 Condition cond ) {
1802 // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1803 // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1804 // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
1805 DCHECK(!dst.is(pc));
1806 DCHECK(!src1.is(pc));
1807 DCHECK(!src2.rm().is(pc));
1808 DCHECK(!src2.rm().is(no_reg));
1809 DCHECK(src2.rs().is(no_reg));
1810 DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
1811 DCHECK(src2.shift_op() == LSL);
1812 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1813 src2.shift_imm_*B7 | B4 | src2.rm().code());
1814}
1815
1816
1817void Assembler::pkhtb(Register dst,
1818 Register src1,
1819 const Operand& src2,
1820 Condition cond) {
1821 // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1822 // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1823 // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
1824 DCHECK(!dst.is(pc));
1825 DCHECK(!src1.is(pc));
1826 DCHECK(!src2.rm().is(pc));
1827 DCHECK(!src2.rm().is(no_reg));
1828 DCHECK(src2.rs().is(no_reg));
1829 DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
1830 DCHECK(src2.shift_op() == ASR);
1831 int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
1832 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1833 asr*B7 | B6 | B4 | src2.rm().code());
1834}
1835
1836
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001837void Assembler::sxtb(Register dst, Register src, int rotate, Condition cond) {
1838 // Instruction details available in ARM DDI 0406C.b, A8.8.233.
1839 // cond(31-28) | 01101010(27-20) | 1111(19-16) |
1840 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1841 DCHECK(!dst.is(pc));
1842 DCHECK(!src.is(pc));
1843 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1844 emit(cond | 0x6A * B20 | 0xF * B16 | dst.code() * B12 |
1845 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1846}
1847
1848
1849void Assembler::sxtab(Register dst, Register src1, Register src2, int rotate,
1850 Condition cond) {
1851 // Instruction details available in ARM DDI 0406C.b, A8.8.233.
1852 // cond(31-28) | 01101010(27-20) | Rn(19-16) |
1853 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1854 DCHECK(!dst.is(pc));
1855 DCHECK(!src1.is(pc));
1856 DCHECK(!src2.is(pc));
1857 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1858 emit(cond | 0x6A * B20 | src1.code() * B16 | dst.code() * B12 |
1859 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
1860}
1861
1862
1863void Assembler::sxth(Register dst, Register src, int rotate, Condition cond) {
1864 // Instruction details available in ARM DDI 0406C.b, A8.8.235.
1865 // cond(31-28) | 01101011(27-20) | 1111(19-16) |
1866 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1867 DCHECK(!dst.is(pc));
1868 DCHECK(!src.is(pc));
1869 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1870 emit(cond | 0x6B * B20 | 0xF * B16 | dst.code() * B12 |
1871 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1872}
1873
1874
1875void Assembler::sxtah(Register dst, Register src1, Register src2, int rotate,
1876 Condition cond) {
1877 // Instruction details available in ARM DDI 0406C.b, A8.8.235.
1878 // cond(31-28) | 01101011(27-20) | Rn(19-16) |
1879 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1880 DCHECK(!dst.is(pc));
1881 DCHECK(!src1.is(pc));
1882 DCHECK(!src2.is(pc));
1883 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1884 emit(cond | 0x6B * B20 | src1.code() * B16 | dst.code() * B12 |
1885 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
1886}
1887
1888
1889void Assembler::uxtb(Register dst, Register src, int rotate, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001890 // Instruction details available in ARM DDI 0406C.b, A8.8.274.
1891 // cond(31-28) | 01101110(27-20) | 1111(19-16) |
1892 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1893 DCHECK(!dst.is(pc));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001894 DCHECK(!src.is(pc));
1895 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1896 emit(cond | 0x6E * B20 | 0xF * B16 | dst.code() * B12 |
1897 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001898}
1899
1900
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001901void Assembler::uxtab(Register dst, Register src1, Register src2, int rotate,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001902 Condition cond) {
1903 // Instruction details available in ARM DDI 0406C.b, A8.8.271.
1904 // cond(31-28) | 01101110(27-20) | Rn(19-16) |
1905 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1906 DCHECK(!dst.is(pc));
1907 DCHECK(!src1.is(pc));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001908 DCHECK(!src2.is(pc));
1909 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1910 emit(cond | 0x6E * B20 | src1.code() * B16 | dst.code() * B12 |
1911 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001912}
1913
1914
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001915void Assembler::uxtb16(Register dst, Register src, int rotate, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001916 // Instruction details available in ARM DDI 0406C.b, A8.8.275.
1917 // cond(31-28) | 01101100(27-20) | 1111(19-16) |
1918 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1919 DCHECK(!dst.is(pc));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001920 DCHECK(!src.is(pc));
1921 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1922 emit(cond | 0x6C * B20 | 0xF * B16 | dst.code() * B12 |
1923 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1924}
1925
1926
1927void Assembler::uxth(Register dst, Register src, int rotate, Condition cond) {
1928 // Instruction details available in ARM DDI 0406C.b, A8.8.276.
1929 // cond(31-28) | 01101111(27-20) | 1111(19-16) |
1930 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1931 DCHECK(!dst.is(pc));
1932 DCHECK(!src.is(pc));
1933 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1934 emit(cond | 0x6F * B20 | 0xF * B16 | dst.code() * B12 |
1935 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1936}
1937
1938
1939void Assembler::uxtah(Register dst, Register src1, Register src2, int rotate,
1940 Condition cond) {
1941 // Instruction details available in ARM DDI 0406C.b, A8.8.273.
1942 // cond(31-28) | 01101111(27-20) | Rn(19-16) |
1943 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1944 DCHECK(!dst.is(pc));
1945 DCHECK(!src1.is(pc));
1946 DCHECK(!src2.is(pc));
1947 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1948 emit(cond | 0x6F * B20 | src1.code() * B16 | dst.code() * B12 |
1949 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001950}
1951
1952
Ben Murdoch097c5b22016-05-18 11:27:45 +01001953void Assembler::rbit(Register dst, Register src, Condition cond) {
1954 // Instruction details available in ARM DDI 0406C.b, A8.8.144.
1955 // cond(31-28) | 011011111111(27-16) | Rd(15-12) | 11110011(11-4) | Rm(3-0)
1956 DCHECK(IsEnabled(ARMv7));
1957 DCHECK(!dst.is(pc));
1958 DCHECK(!src.is(pc));
1959 emit(cond | 0x6FF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
1960}
1961
1962
Andrei Popescu31002712010-02-23 13:46:05 +00001963// Status register access instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001964void Assembler::mrs(Register dst, SRegister s, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001965 DCHECK(!dst.is(pc));
Steve Blocka7e24c12009-10-30 11:49:00 +00001966 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
1967}
1968
1969
1970void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
1971 Condition cond) {
Ben Murdochda12d292016-06-02 14:46:10 +01001972 DCHECK((fields & 0x000f0000) != 0); // At least one field must be set.
1973 DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR));
Steve Blocka7e24c12009-10-30 11:49:00 +00001974 Instr instr;
1975 if (!src.rm_.is_valid()) {
Andrei Popescu31002712010-02-23 13:46:05 +00001976 // Immediate.
Steve Blocka7e24c12009-10-30 11:49:00 +00001977 uint32_t rotate_imm;
1978 uint32_t immed_8;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001979 if (src.must_output_reloc_info(this) ||
Steve Blocka7e24c12009-10-30 11:49:00 +00001980 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
Andrei Popescu31002712010-02-23 13:46:05 +00001981 // Immediate operand cannot be encoded, load it first to register ip.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001982 move_32_bit_immediate(ip, src);
Steve Blocka7e24c12009-10-30 11:49:00 +00001983 msr(fields, Operand(ip), cond);
1984 return;
1985 }
1986 instr = I | rotate_imm*B8 | immed_8;
1987 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001988 DCHECK(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
Steve Blocka7e24c12009-10-30 11:49:00 +00001989 instr = src.rm_.code();
1990 }
1991 emit(cond | instr | B24 | B21 | fields | 15*B12);
1992}
1993
1994
Andrei Popescu31002712010-02-23 13:46:05 +00001995// Load/Store instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001996void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
1997 if (dst.is(pc)) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001998 positions_recorder()->WriteRecordedPositions();
Steve Blocka7e24c12009-10-30 11:49:00 +00001999 }
2000 addrmod2(cond | B26 | L, dst, src);
Steve Blocka7e24c12009-10-30 11:49:00 +00002001}
2002
2003
2004void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
2005 addrmod2(cond | B26, src, dst);
Steve Blocka7e24c12009-10-30 11:49:00 +00002006}
2007
2008
2009void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
2010 addrmod2(cond | B26 | B | L, dst, src);
2011}
2012
2013
2014void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
2015 addrmod2(cond | B26 | B, src, dst);
2016}
2017
2018
2019void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
2020 addrmod3(cond | L | B7 | H | B4, dst, src);
2021}
2022
2023
2024void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
2025 addrmod3(cond | B7 | H | B4, src, dst);
2026}
2027
2028
2029void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
2030 addrmod3(cond | L | B7 | S6 | B4, dst, src);
2031}
2032
2033
2034void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
2035 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
2036}
2037
2038
Leon Clarkef7060e22010-06-03 12:02:55 +01002039void Assembler::ldrd(Register dst1, Register dst2,
2040 const MemOperand& src, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002041 DCHECK(IsEnabled(ARMv7));
2042 DCHECK(src.rm().is(no_reg));
2043 DCHECK(!dst1.is(lr)); // r14.
2044 DCHECK_EQ(0, dst1.code() % 2);
2045 DCHECK_EQ(dst1.code() + 1, dst2.code());
Leon Clarkef7060e22010-06-03 12:02:55 +01002046 addrmod3(cond | B7 | B6 | B4, dst1, src);
Kristian Monsen25f61362010-05-21 11:50:48 +01002047}
2048
2049
Leon Clarkef7060e22010-06-03 12:02:55 +01002050void Assembler::strd(Register src1, Register src2,
2051 const MemOperand& dst, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002052 DCHECK(dst.rm().is(no_reg));
2053 DCHECK(!src1.is(lr)); // r14.
2054 DCHECK_EQ(0, src1.code() % 2);
2055 DCHECK_EQ(src1.code() + 1, src2.code());
2056 DCHECK(IsEnabled(ARMv7));
Leon Clarkef7060e22010-06-03 12:02:55 +01002057 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
Kristian Monsen25f61362010-05-21 11:50:48 +01002058}
2059
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002060
2061// Preload instructions.
2062void Assembler::pld(const MemOperand& address) {
2063 // Instruction details available in ARM DDI 0406C.b, A8.8.128.
2064 // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
2065 // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
2066 DCHECK(address.rm().is(no_reg));
2067 DCHECK(address.am() == Offset);
2068 int U = B23;
2069 int offset = address.offset();
2070 if (offset < 0) {
2071 offset = -offset;
2072 U = 0;
2073 }
2074 DCHECK(offset < 4096);
2075 emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
2076 0xf*B12 | offset);
2077}
2078
2079
Andrei Popescu31002712010-02-23 13:46:05 +00002080// Load/Store multiple instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00002081void Assembler::ldm(BlockAddrMode am,
2082 Register base,
2083 RegList dst,
2084 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00002085 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002086 DCHECK(base.is(sp) || (dst & sp.bit()) == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002087
2088 addrmod4(cond | B27 | am | L, base, dst);
2089
Andrei Popescu31002712010-02-23 13:46:05 +00002090 // Emit the constant pool after a function return implemented by ldm ..{..pc}.
Steve Blocka7e24c12009-10-30 11:49:00 +00002091 if (cond == al && (dst & pc.bit()) != 0) {
2092 // There is a slight chance that the ldm instruction was actually a call,
2093 // in which case it would be wrong to return into the constant pool; we
2094 // recognize this case by checking if the emission of the pool was blocked
2095 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
2096 // the case, we emit a jump over the pool.
2097 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
2098 }
2099}
2100
2101
2102void Assembler::stm(BlockAddrMode am,
2103 Register base,
2104 RegList src,
2105 Condition cond) {
2106 addrmod4(cond | B27 | am, base, src);
2107}
2108
2109
Andrei Popescu31002712010-02-23 13:46:05 +00002110// Exception-generating instructions and debugging support.
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08002111// Stops with a non-negative code less than kNumOfWatchedStops support
2112// enabling/disabling and a counter feature. See simulator-arm.h .
2113void Assembler::stop(const char* msg, Condition cond, int32_t code) {
Andrei Popescu402d9372010-02-26 13:31:12 +00002114#ifndef __arm__
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002115 DCHECK(code >= kDefaultStopCode);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002116 {
2117 // The Simulator will handle the stop instruction and get the message
2118 // address. It expects to find the address just after the svc instruction.
2119 BlockConstPoolScope block_const_pool(this);
2120 if (code >= 0) {
2121 svc(kStopCode + code, cond);
2122 } else {
2123 svc(kStopCode + kMaxStopCode, cond);
2124 }
2125 emit(reinterpret_cast<Instr>(msg));
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08002126 }
Andrei Popescu402d9372010-02-26 13:31:12 +00002127#else // def __arm__
Steve Block1e0659c2011-05-24 12:43:12 +01002128 if (cond != al) {
2129 Label skip;
2130 b(&skip, NegateCondition(cond));
2131 bkpt(0);
2132 bind(&skip);
2133 } else {
2134 bkpt(0);
2135 }
Andrei Popescu402d9372010-02-26 13:31:12 +00002136#endif // def __arm__
Steve Blocka7e24c12009-10-30 11:49:00 +00002137}
2138
2139
2140void Assembler::bkpt(uint32_t imm16) { // v5 and above
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002141 DCHECK(is_uint16(imm16));
Steve Block1e0659c2011-05-24 12:43:12 +01002142 emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
Steve Blocka7e24c12009-10-30 11:49:00 +00002143}
2144
2145
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08002146void Assembler::svc(uint32_t imm24, Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002147 DCHECK(is_uint24(imm24));
Steve Blocka7e24c12009-10-30 11:49:00 +00002148 emit(cond | 15*B24 | imm24);
2149}
2150
2151
Ben Murdoch097c5b22016-05-18 11:27:45 +01002152void Assembler::dmb(BarrierOption option) {
2153 emit(kSpecialCondition | 0x57ff*B12 | 5*B4 | option);
2154}
2155
2156
2157void Assembler::dsb(BarrierOption option) {
2158 emit(kSpecialCondition | 0x57ff*B12 | 4*B4 | option);
2159}
2160
2161
2162void Assembler::isb(BarrierOption option) {
2163 emit(kSpecialCondition | 0x57ff*B12 | 6*B4 | option);
2164}
2165
2166
Andrei Popescu31002712010-02-23 13:46:05 +00002167// Coprocessor instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +00002168void Assembler::cdp(Coprocessor coproc,
2169 int opcode_1,
2170 CRegister crd,
2171 CRegister crn,
2172 CRegister crm,
2173 int opcode_2,
2174 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002175 DCHECK(is_uint4(opcode_1) && is_uint3(opcode_2));
Steve Blocka7e24c12009-10-30 11:49:00 +00002176 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
2177 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
2178}
2179
2180
2181void Assembler::cdp2(Coprocessor coproc,
2182 int opcode_1,
2183 CRegister crd,
2184 CRegister crn,
2185 CRegister crm,
2186 int opcode_2) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01002187 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00002188}
2189
2190
2191void Assembler::mcr(Coprocessor coproc,
2192 int opcode_1,
2193 Register rd,
2194 CRegister crn,
2195 CRegister crm,
2196 int opcode_2,
2197 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002198 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
Steve Blocka7e24c12009-10-30 11:49:00 +00002199 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
2200 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2201}
2202
2203
2204void Assembler::mcr2(Coprocessor coproc,
2205 int opcode_1,
2206 Register rd,
2207 CRegister crn,
2208 CRegister crm,
2209 int opcode_2) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01002210 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00002211}
2212
2213
2214void Assembler::mrc(Coprocessor coproc,
2215 int opcode_1,
2216 Register rd,
2217 CRegister crn,
2218 CRegister crm,
2219 int opcode_2,
2220 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002221 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
Steve Blocka7e24c12009-10-30 11:49:00 +00002222 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
2223 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2224}
2225
2226
2227void Assembler::mrc2(Coprocessor coproc,
2228 int opcode_1,
2229 Register rd,
2230 CRegister crn,
2231 CRegister crm,
2232 int opcode_2) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01002233 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00002234}
2235
2236
2237void Assembler::ldc(Coprocessor coproc,
2238 CRegister crd,
2239 const MemOperand& src,
2240 LFlag l,
2241 Condition cond) {
2242 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
2243}
2244
2245
2246void Assembler::ldc(Coprocessor coproc,
2247 CRegister crd,
2248 Register rn,
2249 int option,
2250 LFlag l,
2251 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +00002252 // Unindexed addressing.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002253 DCHECK(is_uint8(option));
Steve Blocka7e24c12009-10-30 11:49:00 +00002254 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
2255 coproc*B8 | (option & 255));
2256}
2257
2258
2259void Assembler::ldc2(Coprocessor coproc,
2260 CRegister crd,
2261 const MemOperand& src,
2262 LFlag l) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01002263 ldc(coproc, crd, src, l, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00002264}
2265
2266
2267void Assembler::ldc2(Coprocessor coproc,
2268 CRegister crd,
2269 Register rn,
2270 int option,
2271 LFlag l) { // v5 and above
Steve Block1e0659c2011-05-24 12:43:12 +01002272 ldc(coproc, crd, rn, option, l, kSpecialCondition);
Steve Blocka7e24c12009-10-30 11:49:00 +00002273}
2274
2275
Steve Blockd0582a62009-12-15 09:54:21 +00002276// Support for VFP.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002277
Leon Clarked91b9f72010-01-27 17:25:45 +00002278void Assembler::vldr(const DwVfpRegister dst,
2279 const Register base,
2280 int offset,
2281 const Condition cond) {
2282 // Ddst = MEM(Rbase + offset).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002283 // Instruction details available in ARM DDI 0406C.b, A8-924.
2284 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
2285 // Vd(15-12) | 1011(11-8) | offset
Ben Murdochb0fe1622011-05-05 13:52:32 +01002286 int u = 1;
2287 if (offset < 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002288 CHECK(offset != kMinInt);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002289 offset = -offset;
2290 u = 0;
2291 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002292 int vd, d;
2293 dst.split_code(&vd, &d);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002294
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002295 DCHECK(offset >= 0);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002296 if ((offset % 4) == 0 && (offset / 4) < 256) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002297 emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002298 0xB*B8 | ((offset / 4) & 255));
2299 } else {
2300 // Larger offsets must be handled by computing the correct address
2301 // in the ip register.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002302 DCHECK(!base.is(ip));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002303 if (u == 1) {
2304 add(ip, base, Operand(offset));
2305 } else {
2306 sub(ip, base, Operand(offset));
2307 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002308 emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002309 }
2310}
2311
2312
2313void Assembler::vldr(const DwVfpRegister dst,
2314 const MemOperand& operand,
2315 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002316 DCHECK(operand.am_ == Offset);
2317 if (operand.rm().is_valid()) {
2318 add(ip, operand.rn(),
2319 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2320 vldr(dst, ip, 0, cond);
2321 } else {
2322 vldr(dst, operand.rn(), operand.offset(), cond);
2323 }
Leon Clarked91b9f72010-01-27 17:25:45 +00002324}
2325
2326
Steve Block6ded16b2010-05-10 14:33:55 +01002327void Assembler::vldr(const SwVfpRegister dst,
2328 const Register base,
2329 int offset,
2330 const Condition cond) {
2331 // Sdst = MEM(Rbase + offset).
2332 // Instruction details available in ARM DDI 0406A, A8-628.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002333 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
Steve Block6ded16b2010-05-10 14:33:55 +01002334 // Vdst(15-12) | 1010(11-8) | offset
Ben Murdochb0fe1622011-05-05 13:52:32 +01002335 int u = 1;
2336 if (offset < 0) {
2337 offset = -offset;
2338 u = 0;
2339 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002340 int sd, d;
2341 dst.split_code(&sd, &d);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002342 DCHECK(offset >= 0);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002343
2344 if ((offset % 4) == 0 && (offset / 4) < 256) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01002345 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
Steve Block6ded16b2010-05-10 14:33:55 +01002346 0xA*B8 | ((offset / 4) & 255));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002347 } else {
2348 // Larger offsets must be handled by computing the correct address
2349 // in the ip register.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002350 DCHECK(!base.is(ip));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002351 if (u == 1) {
2352 add(ip, base, Operand(offset));
2353 } else {
2354 sub(ip, base, Operand(offset));
2355 }
2356 emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2357 }
2358}
2359
2360
2361void Assembler::vldr(const SwVfpRegister dst,
2362 const MemOperand& operand,
2363 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002364 DCHECK(operand.am_ == Offset);
2365 if (operand.rm().is_valid()) {
2366 add(ip, operand.rn(),
2367 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2368 vldr(dst, ip, 0, cond);
2369 } else {
2370 vldr(dst, operand.rn(), operand.offset(), cond);
2371 }
Steve Block6ded16b2010-05-10 14:33:55 +01002372}
2373
2374
Leon Clarked91b9f72010-01-27 17:25:45 +00002375void Assembler::vstr(const DwVfpRegister src,
2376 const Register base,
2377 int offset,
2378 const Condition cond) {
2379 // MEM(Rbase + offset) = Dsrc.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002380 // Instruction details available in ARM DDI 0406C.b, A8-1082.
2381 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
2382 // Vd(15-12) | 1011(11-8) | (offset/4)
Ben Murdochb0fe1622011-05-05 13:52:32 +01002383 int u = 1;
2384 if (offset < 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002385 CHECK(offset != kMinInt);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002386 offset = -offset;
2387 u = 0;
2388 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002389 DCHECK(offset >= 0);
2390 int vd, d;
2391 src.split_code(&vd, &d);
2392
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002393 if ((offset % 4) == 0 && (offset / 4) < 256) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002394 emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
2395 ((offset / 4) & 255));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002396 } else {
2397 // Larger offsets must be handled by computing the correct address
2398 // in the ip register.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002399 DCHECK(!base.is(ip));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002400 if (u == 1) {
2401 add(ip, base, Operand(offset));
2402 } else {
2403 sub(ip, base, Operand(offset));
2404 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002405 emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002406 }
2407}
2408
2409
2410void Assembler::vstr(const DwVfpRegister src,
2411 const MemOperand& operand,
2412 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002413 DCHECK(operand.am_ == Offset);
2414 if (operand.rm().is_valid()) {
2415 add(ip, operand.rn(),
2416 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2417 vstr(src, ip, 0, cond);
2418 } else {
2419 vstr(src, operand.rn(), operand.offset(), cond);
2420 }
Leon Clarked91b9f72010-01-27 17:25:45 +00002421}
2422
2423
Iain Merrick75681382010-08-19 15:07:18 +01002424void Assembler::vstr(const SwVfpRegister src,
2425 const Register base,
2426 int offset,
2427 const Condition cond) {
2428 // MEM(Rbase + offset) = SSrc.
2429 // Instruction details available in ARM DDI 0406A, A8-786.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002430 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
Iain Merrick75681382010-08-19 15:07:18 +01002431 // Vdst(15-12) | 1010(11-8) | (offset/4)
Ben Murdochb0fe1622011-05-05 13:52:32 +01002432 int u = 1;
2433 if (offset < 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002434 CHECK(offset != kMinInt);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002435 offset = -offset;
2436 u = 0;
2437 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002438 int sd, d;
2439 src.split_code(&sd, &d);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002440 DCHECK(offset >= 0);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002441 if ((offset % 4) == 0 && (offset / 4) < 256) {
2442 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
2443 0xA*B8 | ((offset / 4) & 255));
2444 } else {
2445 // Larger offsets must be handled by computing the correct address
2446 // in the ip register.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002447 DCHECK(!base.is(ip));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002448 if (u == 1) {
2449 add(ip, base, Operand(offset));
2450 } else {
2451 sub(ip, base, Operand(offset));
2452 }
2453 emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2454 }
2455}
2456
2457
2458void Assembler::vstr(const SwVfpRegister src,
2459 const MemOperand& operand,
2460 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002461 DCHECK(operand.am_ == Offset);
2462 if (operand.rm().is_valid()) {
2463 add(ip, operand.rn(),
2464 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2465 vstr(src, ip, 0, cond);
2466 } else {
2467 vstr(src, operand.rn(), operand.offset(), cond);
2468 }
Iain Merrick75681382010-08-19 15:07:18 +01002469}
2470
2471
Ben Murdoch8b112d22011-06-08 16:22:53 +01002472void Assembler::vldm(BlockAddrMode am,
2473 Register base,
2474 DwVfpRegister first,
2475 DwVfpRegister last,
2476 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002477 // Instruction details available in ARM DDI 0406C.b, A8-922.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002478 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002479 // first(15-12) | 1011(11-8) | (count * 2)
2480 DCHECK_LE(first.code(), last.code());
2481 DCHECK(am == ia || am == ia_w || am == db_w);
2482 DCHECK(!base.is(pc));
Ben Murdoch8b112d22011-06-08 16:22:53 +01002483
2484 int sd, d;
2485 first.split_code(&sd, &d);
2486 int count = last.code() - first.code() + 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002487 DCHECK(count <= 16);
Ben Murdoch8b112d22011-06-08 16:22:53 +01002488 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2489 0xB*B8 | count*2);
2490}
2491
2492
2493void Assembler::vstm(BlockAddrMode am,
2494 Register base,
2495 DwVfpRegister first,
2496 DwVfpRegister last,
2497 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002498 // Instruction details available in ARM DDI 0406C.b, A8-1080.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002499 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2500 // first(15-12) | 1011(11-8) | (count * 2)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002501 DCHECK_LE(first.code(), last.code());
2502 DCHECK(am == ia || am == ia_w || am == db_w);
2503 DCHECK(!base.is(pc));
Ben Murdoch8b112d22011-06-08 16:22:53 +01002504
2505 int sd, d;
2506 first.split_code(&sd, &d);
2507 int count = last.code() - first.code() + 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002508 DCHECK(count <= 16);
Ben Murdoch8b112d22011-06-08 16:22:53 +01002509 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2510 0xB*B8 | count*2);
2511}
2512
2513void Assembler::vldm(BlockAddrMode am,
2514 Register base,
2515 SwVfpRegister first,
2516 SwVfpRegister last,
2517 Condition cond) {
2518 // Instruction details available in ARM DDI 0406A, A8-626.
2519 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2520 // first(15-12) | 1010(11-8) | (count/2)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002521 DCHECK_LE(first.code(), last.code());
2522 DCHECK(am == ia || am == ia_w || am == db_w);
2523 DCHECK(!base.is(pc));
Ben Murdoch8b112d22011-06-08 16:22:53 +01002524
2525 int sd, d;
2526 first.split_code(&sd, &d);
2527 int count = last.code() - first.code() + 1;
2528 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2529 0xA*B8 | count);
2530}
2531
2532
2533void Assembler::vstm(BlockAddrMode am,
2534 Register base,
2535 SwVfpRegister first,
2536 SwVfpRegister last,
2537 Condition cond) {
2538 // Instruction details available in ARM DDI 0406A, A8-784.
2539 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2540 // first(15-12) | 1011(11-8) | (count/2)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002541 DCHECK_LE(first.code(), last.code());
2542 DCHECK(am == ia || am == ia_w || am == db_w);
2543 DCHECK(!base.is(pc));
Ben Murdoch8b112d22011-06-08 16:22:53 +01002544
2545 int sd, d;
2546 first.split_code(&sd, &d);
2547 int count = last.code() - first.code() + 1;
2548 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2549 0xA*B8 | count);
2550}
2551
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002552
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002553static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2554 uint64_t i;
2555 memcpy(&i, &d, 8);
2556
2557 *lo = i & 0xffffffff;
2558 *hi = i >> 32;
2559}
2560
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002561
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002562// Only works for little endian floating point formats.
2563// We don't support VFP on the mixed endian floating point platform.
Ben Murdochda12d292016-06-02 14:46:10 +01002564static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002565 DCHECK(CpuFeatures::IsSupported(VFP3));
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002566
2567 // VMOV can accept an immediate of the form:
2568 //
2569 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
2570 //
2571 // The immediate is encoded using an 8-bit quantity, comprised of two
2572 // 4-bit fields. For an 8-bit immediate of the form:
2573 //
2574 // [abcdefgh]
2575 //
2576 // where a is the MSB and h is the LSB, an immediate 64-bit double can be
2577 // created of the form:
2578 //
2579 // [aBbbbbbb,bbcdefgh,00000000,00000000,
2580 // 00000000,00000000,00000000,00000000]
2581 //
2582 // where B = ~b.
2583 //
2584
2585 uint32_t lo, hi;
2586 DoubleAsTwoUInt32(d, &lo, &hi);
2587
2588 // The most obvious constraint is the long block of zeroes.
2589 if ((lo != 0) || ((hi & 0xffff) != 0)) {
2590 return false;
2591 }
2592
Ben Murdochda12d292016-06-02 14:46:10 +01002593 // Bits 61:54 must be all clear or all set.
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002594 if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
2595 return false;
2596 }
2597
Ben Murdochda12d292016-06-02 14:46:10 +01002598 // Bit 62 must be NOT bit 61.
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002599 if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2600 return false;
2601 }
2602
2603 // Create the encoded immediate in the form:
2604 // [00000000,0000abcd,00000000,0000efgh]
2605 *encoding = (hi >> 16) & 0xf; // Low nybble.
2606 *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
2607 *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
2608
2609 return true;
2610}
2611
2612
Ben Murdochda12d292016-06-02 14:46:10 +01002613void Assembler::vmov(const SwVfpRegister dst, float imm) {
2614 uint32_t enc;
2615 if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
2616 // The float can be encoded in the instruction.
2617 //
2618 // Sd = immediate
2619 // Instruction details available in ARM DDI 0406C.b, A8-936.
2620 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2621 // Vd(15-12) | 101(11-9) | sz=0(8) | imm4L(3-0)
2622 int vd, d;
2623 dst.split_code(&vd, &d);
2624 emit(al | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | enc);
2625 } else {
2626 mov(ip, Operand(bit_cast<int32_t>(imm)));
2627 vmov(dst, ip);
2628 }
2629}
2630
2631
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002632void Assembler::vmov(const DwVfpRegister dst,
2633 double imm,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002634 const Register scratch) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002635 uint32_t enc;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002636 // If the embedded constant pool is disabled, we can use the normal, inline
2637 // constant pool. If the embedded constant pool is enabled (via
2638 // FLAG_enable_embedded_constant_pool), we can only use it where the pool
2639 // pointer (pp) is valid.
2640 bool can_use_pool =
2641 !FLAG_enable_embedded_constant_pool || is_constant_pool_available();
Ben Murdochda12d292016-06-02 14:46:10 +01002642 if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002643 // The double can be encoded in the instruction.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002644 //
2645 // Dd = immediate
2646 // Instruction details available in ARM DDI 0406C.b, A8-936.
2647 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2648 // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
2649 int vd, d;
2650 dst.split_code(&vd, &d);
2651 emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002652 } else if (FLAG_enable_vldr_imm && can_use_pool) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002653 // TODO(jfb) Temporarily turned off until we have constant blinding or
2654 // some equivalent mitigation: an attacker can otherwise control
2655 // generated data which also happens to be executable, a Very Bad
2656 // Thing indeed.
2657 // Blinding gets tricky because we don't have xor, we probably
2658 // need to add/subtract without losing precision, which requires a
2659 // cookie value that Lithium is probably better positioned to
2660 // choose.
2661 // We could also add a few peepholes here like detecting 0.0 and
2662 // -0.0 and doing a vmov from the sequestered d14, forcing denorms
2663 // to zero (we set flush-to-zero), and normalizing NaN values.
2664 // We could also detect redundant values.
2665 // The code could also randomize the order of values, though
2666 // that's tricky because vldr has a limited reach. Furthermore
2667 // it breaks load locality.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002668 ConstantPoolEntry::Access access = ConstantPoolAddEntry(pc_offset(), imm);
2669 if (access == ConstantPoolEntry::OVERFLOWED) {
2670 DCHECK(FLAG_enable_embedded_constant_pool);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002671 // Emit instructions to load constant pool offset.
2672 movw(ip, 0);
2673 movt(ip, 0);
2674 // Load from constant pool at offset.
2675 vldr(dst, MemOperand(pp, ip));
2676 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002677 DCHECK(access == ConstantPoolEntry::REGULAR);
2678 vldr(dst, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002679 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002680 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002681 // Synthesise the double from ARM immediates.
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002682 uint32_t lo, hi;
2683 DoubleAsTwoUInt32(imm, &lo, &hi);
2684
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002685 if (lo == hi) {
2686 // Move the low and high parts of the double to a D register in one
2687 // instruction.
2688 mov(ip, Operand(lo));
2689 vmov(dst, ip, ip);
2690 } else if (scratch.is(no_reg)) {
2691 mov(ip, Operand(lo));
2692 vmov(dst, VmovIndexLo, ip);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002693 if (((lo & 0xffff) == (hi & 0xffff)) &&
2694 CpuFeatures::IsSupported(ARMv7)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002695 movt(ip, hi >> 16);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002696 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002697 mov(ip, Operand(hi));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002698 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002699 vmov(dst, VmovIndexHi, ip);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002700 } else {
2701 // Move the low and high parts of the double to a D register in one
2702 // instruction.
2703 mov(ip, Operand(lo));
2704 mov(scratch, Operand(hi));
2705 vmov(dst, ip, scratch);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002706 }
2707 }
2708}
2709
2710
2711void Assembler::vmov(const SwVfpRegister dst,
2712 const SwVfpRegister src,
2713 const Condition cond) {
2714 // Sd = Sm
2715 // Instruction details available in ARM DDI 0406B, A8-642.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002716 int sd, d, sm, m;
2717 dst.split_code(&sd, &d);
2718 src.split_code(&sm, &m);
2719 emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002720}
2721
2722
Leon Clarkee46be812010-01-19 14:06:41 +00002723void Assembler::vmov(const DwVfpRegister dst,
Steve Block8defd9f2010-07-08 12:39:36 +01002724 const DwVfpRegister src,
2725 const Condition cond) {
2726 // Dd = Dm
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002727 // Instruction details available in ARM DDI 0406C.b, A8-938.
2728 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2729 // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2730 int vd, d;
2731 dst.split_code(&vd, &d);
2732 int vm, m;
2733 src.split_code(&vm, &m);
2734 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
2735 vm);
2736}
2737
2738
2739void Assembler::vmov(const DwVfpRegister dst,
2740 const VmovIndex index,
2741 const Register src,
2742 const Condition cond) {
2743 // Dd[index] = Rt
2744 // Instruction details available in ARM DDI 0406C.b, A8-940.
2745 // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
2746 // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2747 DCHECK(index.index == 0 || index.index == 1);
2748 int vd, d;
2749 dst.split_code(&vd, &d);
2750 emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
2751 d*B7 | B4);
2752}
2753
2754
2755void Assembler::vmov(const Register dst,
2756 const VmovIndex index,
2757 const DwVfpRegister src,
2758 const Condition cond) {
2759 // Dd[index] = Rt
2760 // Instruction details available in ARM DDI 0406C.b, A8.8.342.
2761 // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
2762 // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2763 DCHECK(index.index == 0 || index.index == 1);
2764 int vn, n;
2765 src.split_code(&vn, &n);
2766 emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
2767 0xB*B8 | n*B7 | B4);
Steve Block8defd9f2010-07-08 12:39:36 +01002768}
2769
2770
2771void Assembler::vmov(const DwVfpRegister dst,
Leon Clarkee46be812010-01-19 14:06:41 +00002772 const Register src1,
2773 const Register src2,
2774 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00002775 // Dm = <Rt,Rt2>.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002776 // Instruction details available in ARM DDI 0406C.b, A8-948.
Steve Blockd0582a62009-12-15 09:54:21 +00002777 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2778 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002779 DCHECK(!src1.is(pc) && !src2.is(pc));
2780 int vm, m;
2781 dst.split_code(&vm, &m);
Steve Blockd0582a62009-12-15 09:54:21 +00002782 emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002783 src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
Steve Blockd0582a62009-12-15 09:54:21 +00002784}
2785
2786
Leon Clarkee46be812010-01-19 14:06:41 +00002787void Assembler::vmov(const Register dst1,
2788 const Register dst2,
2789 const DwVfpRegister src,
2790 const Condition cond) {
Steve Blockd0582a62009-12-15 09:54:21 +00002791 // <Rt,Rt2> = Dm.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002792 // Instruction details available in ARM DDI 0406C.b, A8-948.
Steve Blockd0582a62009-12-15 09:54:21 +00002793 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2794 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002795 DCHECK(!dst1.is(pc) && !dst2.is(pc));
2796 int vm, m;
2797 src.split_code(&vm, &m);
Steve Blockd0582a62009-12-15 09:54:21 +00002798 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002799 dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
Steve Blockd0582a62009-12-15 09:54:21 +00002800}
2801
2802
Leon Clarkee46be812010-01-19 14:06:41 +00002803void Assembler::vmov(const SwVfpRegister dst,
Steve Blockd0582a62009-12-15 09:54:21 +00002804 const Register src,
Steve Blockd0582a62009-12-15 09:54:21 +00002805 const Condition cond) {
2806 // Sn = Rt.
2807 // Instruction details available in ARM DDI 0406A, A8-642.
2808 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2809 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002810 DCHECK(!src.is(pc));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002811 int sn, n;
2812 dst.split_code(&sn, &n);
2813 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
Steve Blockd0582a62009-12-15 09:54:21 +00002814}
2815
2816
Leon Clarkee46be812010-01-19 14:06:41 +00002817void Assembler::vmov(const Register dst,
2818 const SwVfpRegister src,
Steve Blockd0582a62009-12-15 09:54:21 +00002819 const Condition cond) {
2820 // Rt = Sn.
2821 // Instruction details available in ARM DDI 0406A, A8-642.
2822 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2823 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002824 DCHECK(!dst.is(pc));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002825 int sn, n;
2826 src.split_code(&sn, &n);
2827 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
Steve Blockd0582a62009-12-15 09:54:21 +00002828}
2829
2830
Steve Block6ded16b2010-05-10 14:33:55 +01002831// Type of data to read from or write to VFP register.
2832// Used as specifier in generic vcvt instruction.
2833enum VFPType { S32, U32, F32, F64 };
2834
2835
2836static bool IsSignedVFPType(VFPType type) {
2837 switch (type) {
2838 case S32:
2839 return true;
2840 case U32:
2841 return false;
2842 default:
2843 UNREACHABLE();
2844 return false;
2845 }
Steve Blockd0582a62009-12-15 09:54:21 +00002846}
2847
2848
Steve Block6ded16b2010-05-10 14:33:55 +01002849static bool IsIntegerVFPType(VFPType type) {
2850 switch (type) {
2851 case S32:
2852 case U32:
2853 return true;
2854 case F32:
2855 case F64:
2856 return false;
2857 default:
2858 UNREACHABLE();
2859 return false;
2860 }
2861}
2862
2863
2864static bool IsDoubleVFPType(VFPType type) {
2865 switch (type) {
2866 case F32:
2867 return false;
2868 case F64:
2869 return true;
2870 default:
2871 UNREACHABLE();
2872 return false;
2873 }
2874}
2875
2876
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002877// Split five bit reg_code based on size of reg_type.
2878// 32-bit register codes are Vm:M
2879// 64-bit register codes are M:Vm
2880// where Vm is four bits, and M is a single bit.
2881static void SplitRegCode(VFPType reg_type,
Steve Block6ded16b2010-05-10 14:33:55 +01002882 int reg_code,
2883 int* vm,
2884 int* m) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002885 DCHECK((reg_code >= 0) && (reg_code <= 31));
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002886 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2887 // 32 bit type.
Steve Block6ded16b2010-05-10 14:33:55 +01002888 *m = reg_code & 0x1;
2889 *vm = reg_code >> 1;
2890 } else {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002891 // 64 bit type.
Steve Block6ded16b2010-05-10 14:33:55 +01002892 *m = (reg_code & 0x10) >> 4;
2893 *vm = reg_code & 0x0F;
2894 }
2895}
2896
2897
2898// Encode vcvt.src_type.dst_type instruction.
2899static Instr EncodeVCVT(const VFPType dst_type,
2900 const int dst_code,
2901 const VFPType src_type,
2902 const int src_code,
Steve Block1e0659c2011-05-24 12:43:12 +01002903 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002904 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002905 DCHECK(src_type != dst_type);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002906 int D, Vd, M, Vm;
2907 SplitRegCode(src_type, src_code, &Vm, &M);
2908 SplitRegCode(dst_type, dst_code, &Vd, &D);
2909
Steve Block6ded16b2010-05-10 14:33:55 +01002910 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2911 // Conversion between IEEE floating point and 32-bit integer.
2912 // Instruction details available in ARM DDI 0406B, A8.6.295.
2913 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
2914 // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002915 DCHECK(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
Steve Block6ded16b2010-05-10 14:33:55 +01002916
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002917 int sz, opc2, op;
Steve Block6ded16b2010-05-10 14:33:55 +01002918
2919 if (IsIntegerVFPType(dst_type)) {
2920 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2921 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
Russell Brenner90bac252010-11-18 13:33:46 -08002922 op = mode;
Steve Block6ded16b2010-05-10 14:33:55 +01002923 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002924 DCHECK(IsIntegerVFPType(src_type));
Steve Block6ded16b2010-05-10 14:33:55 +01002925 opc2 = 0x0;
2926 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2927 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
Steve Block6ded16b2010-05-10 14:33:55 +01002928 }
2929
2930 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
2931 Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
2932 } else {
2933 // Conversion between IEEE double and single precision.
2934 // Instruction details available in ARM DDI 0406B, A8.6.298.
2935 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
2936 // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002937 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
Steve Block6ded16b2010-05-10 14:33:55 +01002938 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
2939 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
2940 }
2941}
2942
2943
2944void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
2945 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002946 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002947 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08002948 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002949}
2950
2951
2952void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
2953 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002954 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002955 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08002956 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002957}
2958
2959
2960void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
2961 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002962 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002963 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08002964 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002965}
2966
2967
Ben Murdoch097c5b22016-05-18 11:27:45 +01002968void Assembler::vcvt_f32_u32(const SwVfpRegister dst, const SwVfpRegister src,
2969 VFPConversionMode mode, const Condition cond) {
2970 emit(EncodeVCVT(F32, dst.code(), U32, src.code(), mode, cond));
2971}
2972
2973
2974void Assembler::vcvt_s32_f32(const SwVfpRegister dst, const SwVfpRegister src,
2975 VFPConversionMode mode, const Condition cond) {
2976 emit(EncodeVCVT(S32, dst.code(), F32, src.code(), mode, cond));
2977}
2978
2979
2980void Assembler::vcvt_u32_f32(const SwVfpRegister dst, const SwVfpRegister src,
2981 VFPConversionMode mode, const Condition cond) {
2982 emit(EncodeVCVT(U32, dst.code(), F32, src.code(), mode, cond));
2983}
2984
2985
Steve Block6ded16b2010-05-10 14:33:55 +01002986void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
2987 const DwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002988 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002989 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08002990 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002991}
2992
2993
2994void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
2995 const DwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01002996 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01002997 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08002998 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01002999}
3000
3001
3002void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
3003 const SwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01003004 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01003005 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08003006 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
Steve Block6ded16b2010-05-10 14:33:55 +01003007}
3008
3009
3010void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
3011 const DwVfpRegister src,
Steve Block1e0659c2011-05-24 12:43:12 +01003012 VFPConversionMode mode,
Steve Block6ded16b2010-05-10 14:33:55 +01003013 const Condition cond) {
Russell Brenner90bac252010-11-18 13:33:46 -08003014 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
Steve Blockd0582a62009-12-15 09:54:21 +00003015}
3016
3017
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003018void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
3019 int fraction_bits,
3020 const Condition cond) {
3021 // Instruction details available in ARM DDI 0406C.b, A8-874.
3022 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
3023 // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
3024 DCHECK(fraction_bits > 0 && fraction_bits <= 32);
3025 DCHECK(CpuFeatures::IsSupported(VFP3));
3026 int vd, d;
3027 dst.split_code(&vd, &d);
3028 int imm5 = 32 - fraction_bits;
3029 int i = imm5 & 1;
3030 int imm4 = (imm5 >> 1) & 0xf;
3031 emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
3032 vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
3033}
3034
3035
Steve Block44f0eee2011-05-26 01:26:41 +01003036void Assembler::vneg(const DwVfpRegister dst,
3037 const DwVfpRegister src,
3038 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003039 // Instruction details available in ARM DDI 0406C.b, A8-968.
3040 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
3041 // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3042 int vd, d;
3043 dst.split_code(&vd, &d);
3044 int vm, m;
3045 src.split_code(&vm, &m);
3046
3047 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
3048 m*B5 | vm);
Steve Block44f0eee2011-05-26 01:26:41 +01003049}
3050
3051
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003052void Assembler::vneg(const SwVfpRegister dst, const SwVfpRegister src,
3053 const Condition cond) {
3054 // Instruction details available in ARM DDI 0406C.b, A8-968.
3055 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
3056 // 101(11-9) | sz=0(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3057 int vd, d;
3058 dst.split_code(&vd, &d);
3059 int vm, m;
3060 src.split_code(&vm, &m);
3061
3062 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
3063 B6 | m * B5 | vm);
3064}
3065
3066
Steve Block1e0659c2011-05-24 12:43:12 +01003067void Assembler::vabs(const DwVfpRegister dst,
3068 const DwVfpRegister src,
3069 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003070 // Instruction details available in ARM DDI 0406C.b, A8-524.
3071 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
3072 // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3073 int vd, d;
3074 dst.split_code(&vd, &d);
3075 int vm, m;
3076 src.split_code(&vm, &m);
3077 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
3078 m*B5 | vm);
Steve Block1e0659c2011-05-24 12:43:12 +01003079}
3080
3081
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003082void Assembler::vabs(const SwVfpRegister dst, const SwVfpRegister src,
3083 const Condition cond) {
3084 // Instruction details available in ARM DDI 0406C.b, A8-524.
3085 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
3086 // 101(11-9) | sz=0(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3087 int vd, d;
3088 dst.split_code(&vd, &d);
3089 int vm, m;
3090 src.split_code(&vm, &m);
3091 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | B7 | B6 |
3092 m * B5 | vm);
3093}
3094
3095
Leon Clarkee46be812010-01-19 14:06:41 +00003096void Assembler::vadd(const DwVfpRegister dst,
3097 const DwVfpRegister src1,
3098 const DwVfpRegister src2,
3099 const Condition cond) {
3100 // Dd = vadd(Dn, Dm) double precision floating point addition.
Steve Blockd0582a62009-12-15 09:54:21 +00003101 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003102 // Instruction details available in ARM DDI 0406C.b, A8-830.
3103 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
3104 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3105 int vd, d;
3106 dst.split_code(&vd, &d);
3107 int vn, n;
3108 src1.split_code(&vn, &n);
3109 int vm, m;
3110 src2.split_code(&vm, &m);
3111 emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
3112 n*B7 | m*B5 | vm);
Steve Blockd0582a62009-12-15 09:54:21 +00003113}
3114
3115
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003116void Assembler::vadd(const SwVfpRegister dst, const SwVfpRegister src1,
3117 const SwVfpRegister src2, const Condition cond) {
3118 // Sd = vadd(Sn, Sm) single precision floating point addition.
3119 // Sd = D:Vd; Sm=M:Vm; Sn=N:Vm.
3120 // Instruction details available in ARM DDI 0406C.b, A8-830.
3121 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
3122 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3123 int vd, d;
3124 dst.split_code(&vd, &d);
3125 int vn, n;
3126 src1.split_code(&vn, &n);
3127 int vm, m;
3128 src2.split_code(&vm, &m);
3129 emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
3130 0x5 * B9 | n * B7 | m * B5 | vm);
3131}
3132
3133
Leon Clarkee46be812010-01-19 14:06:41 +00003134void Assembler::vsub(const DwVfpRegister dst,
3135 const DwVfpRegister src1,
3136 const DwVfpRegister src2,
3137 const Condition cond) {
3138 // Dd = vsub(Dn, Dm) double precision floating point subtraction.
Steve Blockd0582a62009-12-15 09:54:21 +00003139 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003140 // Instruction details available in ARM DDI 0406C.b, A8-1086.
3141 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
3142 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3143 int vd, d;
3144 dst.split_code(&vd, &d);
3145 int vn, n;
3146 src1.split_code(&vn, &n);
3147 int vm, m;
3148 src2.split_code(&vm, &m);
3149 emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
3150 n*B7 | B6 | m*B5 | vm);
Steve Blockd0582a62009-12-15 09:54:21 +00003151}
3152
3153
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003154void Assembler::vsub(const SwVfpRegister dst, const SwVfpRegister src1,
3155 const SwVfpRegister src2, const Condition cond) {
3156 // Sd = vsub(Sn, Sm) single precision floating point subtraction.
3157 // Sd = D:Vd; Sm=M:Vm; Sn=N:Vm.
3158 // Instruction details available in ARM DDI 0406C.b, A8-1086.
3159 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
3160 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3161 int vd, d;
3162 dst.split_code(&vd, &d);
3163 int vn, n;
3164 src1.split_code(&vn, &n);
3165 int vm, m;
3166 src2.split_code(&vm, &m);
3167 emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
3168 0x5 * B9 | n * B7 | B6 | m * B5 | vm);
3169}
3170
3171
Leon Clarkee46be812010-01-19 14:06:41 +00003172void Assembler::vmul(const DwVfpRegister dst,
3173 const DwVfpRegister src1,
3174 const DwVfpRegister src2,
3175 const Condition cond) {
3176 // Dd = vmul(Dn, Dm) double precision floating point multiplication.
Steve Blockd0582a62009-12-15 09:54:21 +00003177 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003178 // Instruction details available in ARM DDI 0406C.b, A8-960.
3179 // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
3180 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3181 int vd, d;
3182 dst.split_code(&vd, &d);
3183 int vn, n;
3184 src1.split_code(&vn, &n);
3185 int vm, m;
3186 src2.split_code(&vm, &m);
3187 emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
3188 n*B7 | m*B5 | vm);
3189}
3190
3191
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003192void Assembler::vmul(const SwVfpRegister dst, const SwVfpRegister src1,
3193 const SwVfpRegister src2, const Condition cond) {
3194 // Sd = vmul(Sn, Sm) single precision floating point multiplication.
3195 // Sd = D:Vd; Sm=M:Vm; Sn=N:Vm.
3196 // Instruction details available in ARM DDI 0406C.b, A8-960.
3197 // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
3198 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3199 int vd, d;
3200 dst.split_code(&vd, &d);
3201 int vn, n;
3202 src1.split_code(&vn, &n);
3203 int vm, m;
3204 src2.split_code(&vm, &m);
3205 emit(cond | 0x1C * B23 | d * B22 | 0x2 * B20 | vn * B16 | vd * B12 |
3206 0x5 * B9 | n * B7 | m * B5 | vm);
3207}
3208
3209
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003210void Assembler::vmla(const DwVfpRegister dst,
3211 const DwVfpRegister src1,
3212 const DwVfpRegister src2,
3213 const Condition cond) {
3214 // Instruction details available in ARM DDI 0406C.b, A8-932.
3215 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3216 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
3217 int vd, d;
3218 dst.split_code(&vd, &d);
3219 int vn, n;
3220 src1.split_code(&vn, &n);
3221 int vm, m;
3222 src2.split_code(&vm, &m);
3223 emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
3224 vm);
3225}
3226
3227
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003228void Assembler::vmla(const SwVfpRegister dst, const SwVfpRegister src1,
3229 const SwVfpRegister src2, const Condition cond) {
3230 // Instruction details available in ARM DDI 0406C.b, A8-932.
3231 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3232 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
3233 int vd, d;
3234 dst.split_code(&vd, &d);
3235 int vn, n;
3236 src1.split_code(&vn, &n);
3237 int vm, m;
3238 src2.split_code(&vm, &m);
3239 emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
3240 m * B5 | vm);
3241}
3242
3243
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003244void Assembler::vmls(const DwVfpRegister dst,
3245 const DwVfpRegister src1,
3246 const DwVfpRegister src2,
3247 const Condition cond) {
3248 // Instruction details available in ARM DDI 0406C.b, A8-932.
3249 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3250 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
3251 int vd, d;
3252 dst.split_code(&vd, &d);
3253 int vn, n;
3254 src1.split_code(&vn, &n);
3255 int vm, m;
3256 src2.split_code(&vm, &m);
3257 emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
3258 m*B5 | vm);
Steve Blockd0582a62009-12-15 09:54:21 +00003259}
3260
3261
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003262void Assembler::vmls(const SwVfpRegister dst, const SwVfpRegister src1,
3263 const SwVfpRegister src2, const Condition cond) {
3264 // Instruction details available in ARM DDI 0406C.b, A8-932.
3265 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3266 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
3267 int vd, d;
3268 dst.split_code(&vd, &d);
3269 int vn, n;
3270 src1.split_code(&vn, &n);
3271 int vm, m;
3272 src2.split_code(&vm, &m);
3273 emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
3274 B6 | m * B5 | vm);
3275}
3276
3277
Leon Clarkee46be812010-01-19 14:06:41 +00003278void Assembler::vdiv(const DwVfpRegister dst,
3279 const DwVfpRegister src1,
3280 const DwVfpRegister src2,
3281 const Condition cond) {
3282 // Dd = vdiv(Dn, Dm) double precision floating point division.
Steve Blockd0582a62009-12-15 09:54:21 +00003283 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003284 // Instruction details available in ARM DDI 0406C.b, A8-882.
3285 // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
3286 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3287 int vd, d;
3288 dst.split_code(&vd, &d);
3289 int vn, n;
3290 src1.split_code(&vn, &n);
3291 int vm, m;
3292 src2.split_code(&vm, &m);
3293 emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
3294 vm);
Steve Blockd0582a62009-12-15 09:54:21 +00003295}
3296
3297
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003298void Assembler::vdiv(const SwVfpRegister dst, const SwVfpRegister src1,
3299 const SwVfpRegister src2, const Condition cond) {
3300 // Sd = vdiv(Sn, Sm) single precision floating point division.
3301 // Sd = D:Vd; Sm=M:Vm; Sn=N:Vm.
3302 // Instruction details available in ARM DDI 0406C.b, A8-882.
3303 // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
3304 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3305 int vd, d;
3306 dst.split_code(&vd, &d);
3307 int vn, n;
3308 src1.split_code(&vn, &n);
3309 int vm, m;
3310 src2.split_code(&vm, &m);
3311 emit(cond | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
3312 m * B5 | vm);
3313}
3314
3315
Leon Clarkee46be812010-01-19 14:06:41 +00003316void Assembler::vcmp(const DwVfpRegister src1,
3317 const DwVfpRegister src2,
Steve Blockd0582a62009-12-15 09:54:21 +00003318 const Condition cond) {
3319 // vcmp(Dd, Dm) double precision floating point comparison.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003320 // Instruction details available in ARM DDI 0406C.b, A8-864.
3321 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
3322 // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3323 int vd, d;
3324 src1.split_code(&vd, &d);
3325 int vm, m;
3326 src2.split_code(&vm, &m);
3327 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
3328 m*B5 | vm);
Steve Blockd0582a62009-12-15 09:54:21 +00003329}
3330
3331
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003332void Assembler::vcmp(const SwVfpRegister src1, const SwVfpRegister src2,
3333 const Condition cond) {
3334 // vcmp(Sd, Sm) single precision floating point comparison.
3335 // Instruction details available in ARM DDI 0406C.b, A8-864.
3336 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
3337 // Vd(15-12) | 101(11-9) | sz=0(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3338 int vd, d;
3339 src1.split_code(&vd, &d);
3340 int vm, m;
3341 src2.split_code(&vm, &m);
3342 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x4 * B16 | vd * B12 |
3343 0x5 * B9 | B6 | m * B5 | vm);
3344}
3345
3346
Iain Merrick75681382010-08-19 15:07:18 +01003347void Assembler::vcmp(const DwVfpRegister src1,
3348 const double src2,
Iain Merrick75681382010-08-19 15:07:18 +01003349 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003350 // vcmp(Dd, #0.0) double precision floating point comparison.
3351 // Instruction details available in ARM DDI 0406C.b, A8-864.
3352 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
3353 // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
3354 DCHECK(src2 == 0.0);
3355 int vd, d;
3356 src1.split_code(&vd, &d);
3357 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
Iain Merrick75681382010-08-19 15:07:18 +01003358}
3359
3360
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003361void Assembler::vcmp(const SwVfpRegister src1, const float src2,
3362 const Condition cond) {
3363 // vcmp(Sd, #0.0) single precision floating point comparison.
3364 // Instruction details available in ARM DDI 0406C.b, A8-864.
3365 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
3366 // Vd(15-12) | 101(11-9) | sz=0(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
3367 DCHECK(src2 == 0.0);
3368 int vd, d;
3369 src1.split_code(&vd, &d);
3370 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x5 * B16 | vd * B12 |
3371 0x5 * B9 | B6);
Steve Blockd0582a62009-12-15 09:54:21 +00003372}
3373
3374
Steve Block8defd9f2010-07-08 12:39:36 +01003375void Assembler::vsqrt(const DwVfpRegister dst,
3376 const DwVfpRegister src,
3377 const Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003378 // Instruction details available in ARM DDI 0406C.b, A8-1058.
3379 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
3380 // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
3381 int vd, d;
3382 dst.split_code(&vd, &d);
3383 int vm, m;
3384 src.split_code(&vm, &m);
3385 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
3386 m*B5 | vm);
3387}
3388
3389
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003390void Assembler::vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
3391 const Condition cond) {
3392 // Instruction details available in ARM DDI 0406C.b, A8-1058.
3393 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
3394 // Vd(15-12) | 101(11-9) | sz=0(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
3395 int vd, d;
3396 dst.split_code(&vd, &d);
3397 int vm, m;
3398 src.split_code(&vm, &m);
3399 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
3400 0x3 * B6 | m * B5 | vm);
3401}
3402
3403
3404void Assembler::vmsr(Register dst, Condition cond) {
3405 // Instruction details available in ARM DDI 0406A, A8-652.
3406 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
3407 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
3408 emit(cond | 0xE * B24 | 0xE * B20 | B16 | dst.code() * B12 | 0xA * B8 | B4);
3409}
3410
3411
3412void Assembler::vmrs(Register dst, Condition cond) {
3413 // Instruction details available in ARM DDI 0406A, A8-652.
3414 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
3415 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
3416 emit(cond | 0xE * B24 | 0xF * B20 | B16 | dst.code() * B12 | 0xA * B8 | B4);
3417}
3418
3419
3420void Assembler::vrinta(const SwVfpRegister dst, const SwVfpRegister src) {
3421 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3422 // 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
3423 // M(5) | 0(4) | Vm(3-0)
3424 DCHECK(CpuFeatures::IsSupported(ARMv8));
3425 int vd, d;
3426 dst.split_code(&vd, &d);
3427 int vm, m;
3428 src.split_code(&vm, &m);
3429 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
3430 0x5 * B9 | B6 | m * B5 | vm);
3431}
3432
3433
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003434void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
3435 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3436 // 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3437 // M(5) | 0(4) | Vm(3-0)
3438 DCHECK(CpuFeatures::IsSupported(ARMv8));
3439 int vd, d;
3440 dst.split_code(&vd, &d);
3441 int vm, m;
3442 src.split_code(&vm, &m);
3443 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
3444 0x5 * B9 | B8 | B6 | m * B5 | vm);
3445}
3446
3447
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003448void Assembler::vrintn(const SwVfpRegister dst, const SwVfpRegister src) {
3449 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3450 // 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
3451 // M(5) | 0(4) | Vm(3-0)
3452 DCHECK(CpuFeatures::IsSupported(ARMv8));
3453 int vd, d;
3454 dst.split_code(&vd, &d);
3455 int vm, m;
3456 src.split_code(&vm, &m);
3457 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
3458 vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
3459}
3460
3461
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003462void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
3463 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3464 // 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3465 // M(5) | 0(4) | Vm(3-0)
3466 DCHECK(CpuFeatures::IsSupported(ARMv8));
3467 int vd, d;
3468 dst.split_code(&vd, &d);
3469 int vm, m;
3470 src.split_code(&vm, &m);
3471 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
3472 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3473}
3474
3475
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003476void Assembler::vrintp(const SwVfpRegister dst, const SwVfpRegister src) {
3477 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3478 // 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
3479 // M(5) | 0(4) | Vm(3-0)
3480 DCHECK(CpuFeatures::IsSupported(ARMv8));
3481 int vd, d;
3482 dst.split_code(&vd, &d);
3483 int vm, m;
3484 src.split_code(&vm, &m);
3485 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
3486 vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
3487}
3488
3489
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003490void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
3491 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3492 // 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3493 // M(5) | 0(4) | Vm(3-0)
3494 DCHECK(CpuFeatures::IsSupported(ARMv8));
3495 int vd, d;
3496 dst.split_code(&vd, &d);
3497 int vm, m;
3498 src.split_code(&vm, &m);
3499 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
3500 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3501}
3502
3503
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003504void Assembler::vrintm(const SwVfpRegister dst, const SwVfpRegister src) {
3505 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3506 // 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
3507 // M(5) | 0(4) | Vm(3-0)
3508 DCHECK(CpuFeatures::IsSupported(ARMv8));
3509 int vd, d;
3510 dst.split_code(&vd, &d);
3511 int vm, m;
3512 src.split_code(&vm, &m);
3513 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
3514 vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
3515}
3516
3517
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003518void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
3519 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3520 // 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3521 // M(5) | 0(4) | Vm(3-0)
3522 DCHECK(CpuFeatures::IsSupported(ARMv8));
3523 int vd, d;
3524 dst.split_code(&vd, &d);
3525 int vm, m;
3526 src.split_code(&vm, &m);
3527 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
3528 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3529}
3530
3531
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003532void Assembler::vrintz(const SwVfpRegister dst, const SwVfpRegister src,
3533 const Condition cond) {
3534 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
3535 // Vd(15-12) | 101(11-9) | sz=0(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3536 DCHECK(CpuFeatures::IsSupported(ARMv8));
3537 int vd, d;
3538 dst.split_code(&vd, &d);
3539 int vm, m;
3540 src.split_code(&vm, &m);
3541 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
3542 0x5 * B9 | B7 | B6 | m * B5 | vm);
3543}
3544
3545
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003546void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
3547 const Condition cond) {
3548 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
3549 // Vd(15-12) | 101(11-9) | sz=1(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3550 DCHECK(CpuFeatures::IsSupported(ARMv8));
3551 int vd, d;
3552 dst.split_code(&vd, &d);
3553 int vm, m;
3554 src.split_code(&vm, &m);
3555 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
3556 0x5 * B9 | B8 | B7 | B6 | m * B5 | vm);
3557}
3558
3559
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003560// Support for NEON.
3561
3562void Assembler::vld1(NeonSize size,
3563 const NeonListOperand& dst,
3564 const NeonMemOperand& src) {
3565 // Instruction details available in ARM DDI 0406C.b, A8.8.320.
3566 // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
3567 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3568 DCHECK(CpuFeatures::IsSupported(NEON));
3569 int vd, d;
3570 dst.base().split_code(&vd, &d);
3571 emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
3572 dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
3573}
3574
3575
3576void Assembler::vst1(NeonSize size,
3577 const NeonListOperand& src,
3578 const NeonMemOperand& dst) {
3579 // Instruction details available in ARM DDI 0406C.b, A8.8.404.
3580 // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
3581 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3582 DCHECK(CpuFeatures::IsSupported(NEON));
3583 int vd, d;
3584 src.base().split_code(&vd, &d);
3585 emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
3586 size*B6 | dst.align()*B4 | dst.rm().code());
3587}
3588
3589
3590void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
3591 // Instruction details available in ARM DDI 0406C.b, A8.8.346.
3592 // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
3593 // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
3594 DCHECK(CpuFeatures::IsSupported(NEON));
3595 int vd, d;
3596 dst.split_code(&vd, &d);
3597 int vm, m;
3598 src.split_code(&vm, &m);
3599 emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
3600 (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
Steve Block8defd9f2010-07-08 12:39:36 +01003601}
3602
3603
Andrei Popescu31002712010-02-23 13:46:05 +00003604// Pseudo instructions.
Steve Block6ded16b2010-05-10 14:33:55 +01003605void Assembler::nop(int type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003606 // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
3607 // some of the CPU's pipeline and has to issue. Older ARM chips simply used
3608 // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
3609 // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
3610 // a type.
3611 DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
Steve Block6ded16b2010-05-10 14:33:55 +01003612 emit(al | 13*B21 | type*B12 | type);
3613}
3614
3615
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003616bool Assembler::IsMovT(Instr instr) {
3617 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3618 ((kNumRegisters-1)*B12) | // mask out register
3619 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3620 return instr == kMovtPattern;
3621}
3622
3623
3624bool Assembler::IsMovW(Instr instr) {
3625 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3626 ((kNumRegisters-1)*B12) | // mask out destination
3627 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3628 return instr == kMovwPattern;
3629}
3630
3631
3632Instr Assembler::GetMovTPattern() { return kMovtPattern; }
3633
3634
3635Instr Assembler::GetMovWPattern() { return kMovwPattern; }
3636
3637
3638Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
3639 DCHECK(immediate < 0x10000);
3640 return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
3641}
3642
3643
3644Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
3645 instruction &= ~EncodeMovwImmediate(0xffff);
3646 return instruction | EncodeMovwImmediate(immediate);
3647}
3648
3649
3650int Assembler::DecodeShiftImm(Instr instr) {
3651 int rotate = Instruction::RotateValue(instr) * 2;
3652 int immed8 = Instruction::Immed8Value(instr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003653 return base::bits::RotateRight32(immed8, rotate);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003654}
3655
3656
3657Instr Assembler::PatchShiftImm(Instr instr, int immed) {
3658 uint32_t rotate_imm = 0;
3659 uint32_t immed_8 = 0;
3660 bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL);
3661 DCHECK(immed_fits);
3662 USE(immed_fits);
3663 return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
3664}
3665
3666
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003667bool Assembler::IsNop(Instr instr, int type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003668 DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
Steve Block1e0659c2011-05-24 12:43:12 +01003669 // Check for mov rx, rx where x = type.
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003670 return instr == (al | 13*B21 | type*B12 | type);
3671}
3672
3673
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003674bool Assembler::IsMovImmed(Instr instr) {
3675 return (instr & kMovImmedMask) == kMovImmedPattern;
3676}
3677
3678
3679bool Assembler::IsOrrImmed(Instr instr) {
3680 return (instr & kOrrImmedMask) == kOrrImmedPattern;
3681}
3682
3683
3684// static
Steve Blockd0582a62009-12-15 09:54:21 +00003685bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
3686 uint32_t dummy1;
3687 uint32_t dummy2;
3688 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
3689}
3690
3691
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003692bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
3693 return is_uint12(abs(imm32));
3694}
3695
3696
Andrei Popescu31002712010-02-23 13:46:05 +00003697// Debugging.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003698void Assembler::RecordConstPool(int size) {
3699 // We only need this for debugger support, to correctly compute offsets in the
3700 // code.
3701 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
3702}
3703
3704
Steve Blocka7e24c12009-10-30 11:49:00 +00003705void Assembler::GrowBuffer() {
3706 if (!own_buffer_) FATAL("external code buffer is too small");
3707
Andrei Popescu31002712010-02-23 13:46:05 +00003708 // Compute new buffer size.
Steve Blocka7e24c12009-10-30 11:49:00 +00003709 CodeDesc desc; // the new buffer
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003710 if (buffer_size_ < 1 * MB) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003711 desc.buffer_size = 2*buffer_size_;
3712 } else {
3713 desc.buffer_size = buffer_size_ + 1*MB;
3714 }
3715 CHECK_GT(desc.buffer_size, 0); // no overflow
3716
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003717 // Set up new buffer.
Steve Blocka7e24c12009-10-30 11:49:00 +00003718 desc.buffer = NewArray<byte>(desc.buffer_size);
3719
3720 desc.instr_size = pc_offset();
3721 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003722 desc.origin = this;
Steve Blocka7e24c12009-10-30 11:49:00 +00003723
Andrei Popescu31002712010-02-23 13:46:05 +00003724 // Copy the data.
Steve Blocka7e24c12009-10-30 11:49:00 +00003725 int pc_delta = desc.buffer - buffer_;
3726 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003727 MemMove(desc.buffer, buffer_, desc.instr_size);
3728 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3729 desc.reloc_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00003730
Andrei Popescu31002712010-02-23 13:46:05 +00003731 // Switch buffers.
Steve Blocka7e24c12009-10-30 11:49:00 +00003732 DeleteArray(buffer_);
3733 buffer_ = desc.buffer;
3734 buffer_size_ = desc.buffer_size;
3735 pc_ += pc_delta;
3736 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3737 reloc_info_writer.last_pc() + pc_delta);
3738
Andrei Popescu31002712010-02-23 13:46:05 +00003739 // None of our relocation types are pc relative pointing outside the code
Steve Blocka7e24c12009-10-30 11:49:00 +00003740 // buffer nor pc absolute pointing inside the code buffer, so there is no need
Andrei Popescu31002712010-02-23 13:46:05 +00003741 // to relocate any emitted relocation entries.
Steve Blocka7e24c12009-10-30 11:49:00 +00003742}
3743
3744
Ben Murdochb0fe1622011-05-05 13:52:32 +01003745void Assembler::db(uint8_t data) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003746 // db is used to write raw data. The constant pool should be emitted or
3747 // blocked before using db.
3748 DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
3749 DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
Ben Murdochb0fe1622011-05-05 13:52:32 +01003750 CheckBuffer();
3751 *reinterpret_cast<uint8_t*>(pc_) = data;
3752 pc_ += sizeof(uint8_t);
3753}
3754
3755
3756void Assembler::dd(uint32_t data) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003757 // dd is used to write raw data. The constant pool should be emitted or
3758 // blocked before using dd.
3759 DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
3760 DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
Ben Murdochb0fe1622011-05-05 13:52:32 +01003761 CheckBuffer();
3762 *reinterpret_cast<uint32_t*>(pc_) = data;
3763 pc_ += sizeof(uint32_t);
3764}
3765
3766
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003767void Assembler::dq(uint64_t value) {
3768 // dq is used to write raw data. The constant pool should be emitted or
3769 // blocked before using dq.
3770 DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
3771 DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
3772 CheckBuffer();
3773 *reinterpret_cast<uint64_t*>(pc_) = value;
3774 pc_ += sizeof(uint64_t);
3775}
3776
3777
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003778void Assembler::emit_code_stub_address(Code* stub) {
3779 CheckBuffer();
3780 *reinterpret_cast<uint32_t*>(pc_) =
3781 reinterpret_cast<uint32_t>(stub->instruction_start());
3782 pc_ += sizeof(uint32_t);
3783}
3784
3785
Steve Blocka7e24c12009-10-30 11:49:00 +00003786void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003787 if (RelocInfo::IsNone(rmode) ||
3788 // Don't record external references unless the heap will be serialized.
3789 (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
3790 !emit_debug_code())) {
3791 return;
3792 }
3793 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
3794 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
3795 data = RecordedAstId().ToInt();
3796 ClearRecordedAstId();
3797 }
3798 RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
3799 reloc_info_writer.Write(&rinfo);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003800}
3801
3802
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003803ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
3804 RelocInfo::Mode rmode,
3805 intptr_t value) {
3806 DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION &&
3807 rmode != RelocInfo::STATEMENT_POSITION &&
3808 rmode != RelocInfo::CONST_POOL && rmode != RelocInfo::NONE64);
3809 bool sharing_ok = RelocInfo::IsNone(rmode) ||
3810 !(serializer_enabled() || rmode < RelocInfo::CELL);
3811 if (FLAG_enable_embedded_constant_pool) {
3812 return constant_pool_builder_.AddEntry(position, value, sharing_ok);
3813 } else {
3814 DCHECK(num_pending_32_bit_constants_ < kMaxNumPending32Constants);
3815 if (num_pending_32_bit_constants_ == 0) {
3816 first_const_pool_32_use_ = position;
3817 } else if (num_pending_32_bit_constants_ == kMinNumPendingConstants &&
3818 pending_32_bit_constants_ ==
3819 &pending_32_bit_constants_buffer_[0]) {
3820 // Inline buffer is full, switch to dynamically allocated buffer.
3821 pending_32_bit_constants_ =
3822 new ConstantPoolEntry[kMaxNumPending32Constants];
3823 std::copy(&pending_32_bit_constants_buffer_[0],
3824 &pending_32_bit_constants_buffer_[kMinNumPendingConstants],
3825 &pending_32_bit_constants_[0]);
Steve Blocka7e24c12009-10-30 11:49:00 +00003826 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003827 ConstantPoolEntry entry(position, value, sharing_ok);
3828 pending_32_bit_constants_[num_pending_32_bit_constants_++] = entry;
3829
3830 // Make sure the constant pool is not emitted in place of the next
3831 // instruction for which we just recorded relocation info.
3832 BlockConstPoolFor(1);
3833 return ConstantPoolEntry::REGULAR;
Steve Blocka7e24c12009-10-30 11:49:00 +00003834 }
3835}
3836
3837
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003838ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
3839 double value) {
3840 if (FLAG_enable_embedded_constant_pool) {
3841 return constant_pool_builder_.AddEntry(position, value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003842 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003843 DCHECK(num_pending_64_bit_constants_ < kMaxNumPending64Constants);
3844 if (num_pending_64_bit_constants_ == 0) {
3845 first_const_pool_64_use_ = position;
3846 } else if (num_pending_64_bit_constants_ == kMinNumPendingConstants &&
3847 pending_64_bit_constants_ ==
3848 &pending_64_bit_constants_buffer_[0]) {
3849 // Inline buffer is full, switch to dynamically allocated buffer.
3850 pending_64_bit_constants_ =
3851 new ConstantPoolEntry[kMaxNumPending64Constants];
3852 std::copy(&pending_64_bit_constants_buffer_[0],
3853 &pending_64_bit_constants_buffer_[kMinNumPendingConstants],
3854 &pending_64_bit_constants_[0]);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003855 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003856 ConstantPoolEntry entry(position, value);
3857 pending_64_bit_constants_[num_pending_64_bit_constants_++] = entry;
3858
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003859 // Make sure the constant pool is not emitted in place of the next
3860 // instruction for which we just recorded relocation info.
3861 BlockConstPoolFor(1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003862 return ConstantPoolEntry::REGULAR;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003863 }
3864}
3865
3866
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003867void Assembler::BlockConstPoolFor(int instructions) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003868 if (FLAG_enable_embedded_constant_pool) {
3869 // Should be a no-op if using an embedded constant pool.
3870 DCHECK(num_pending_32_bit_constants_ == 0);
3871 DCHECK(num_pending_64_bit_constants_ == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003872 return;
3873 }
3874
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003875 int pc_limit = pc_offset() + instructions * kInstrSize;
3876 if (no_const_pool_before_ < pc_limit) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003877 // Max pool start (if we need a jump and an alignment).
3878#ifdef DEBUG
3879 int start = pc_limit + kInstrSize + 2 * kPointerSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003880 DCHECK((num_pending_32_bit_constants_ == 0) ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003881 (start - first_const_pool_32_use_ +
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003882 num_pending_64_bit_constants_ * kDoubleSize <
3883 kMaxDistToIntPool));
3884 DCHECK((num_pending_64_bit_constants_ == 0) ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003885 (start - first_const_pool_64_use_ < kMaxDistToFPPool));
3886#endif
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003887 no_const_pool_before_ = pc_limit;
Steve Blocka7e24c12009-10-30 11:49:00 +00003888 }
3889
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003890 if (next_buffer_check_ < no_const_pool_before_) {
3891 next_buffer_check_ = no_const_pool_before_;
3892 }
3893}
Steve Blocka7e24c12009-10-30 11:49:00 +00003894
Steve Blocka7e24c12009-10-30 11:49:00 +00003895
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003896void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003897 if (FLAG_enable_embedded_constant_pool) {
3898 // Should be a no-op if using an embedded constant pool.
3899 DCHECK(num_pending_32_bit_constants_ == 0);
3900 DCHECK(num_pending_64_bit_constants_ == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003901 return;
3902 }
3903
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003904 // Some short sequence of instruction mustn't be broken up by constant pool
3905 // emission, such sequences are protected by calls to BlockConstPoolFor and
3906 // BlockConstPoolScope.
3907 if (is_const_pool_blocked()) {
Andrei Popescu31002712010-02-23 13:46:05 +00003908 // Something is wrong if emission is forced and blocked at the same time.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003909 DCHECK(!force_emit);
Steve Blocka7e24c12009-10-30 11:49:00 +00003910 return;
3911 }
3912
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003913 // There is nothing to do if there are no pending constant pool entries.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003914 if ((num_pending_32_bit_constants_ == 0) &&
3915 (num_pending_64_bit_constants_ == 0)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003916 // Calculate the offset of the next check.
3917 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3918 return;
3919 }
3920
Steve Blocka7e24c12009-10-30 11:49:00 +00003921 // Check that the code buffer is large enough before emitting the constant
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003922 // pool (include the jump over the pool and the constant pool marker and
3923 // the gap to the relocation information).
3924 int jump_instr = require_jump ? kInstrSize : 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003925 int size_up_to_marker = jump_instr + kInstrSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003926 int estimated_size_after_marker =
3927 num_pending_32_bit_constants_ * kPointerSize;
3928 bool has_int_values = (num_pending_32_bit_constants_ > 0);
3929 bool has_fp_values = (num_pending_64_bit_constants_ > 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003930 bool require_64_bit_align = false;
3931 if (has_fp_values) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003932 require_64_bit_align =
3933 !IsAligned(reinterpret_cast<intptr_t>(pc_ + size_up_to_marker),
3934 kDoubleAlignment);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003935 if (require_64_bit_align) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003936 estimated_size_after_marker += kInstrSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003937 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003938 estimated_size_after_marker += num_pending_64_bit_constants_ * kDoubleSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003939 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003940 int estimated_size = size_up_to_marker + estimated_size_after_marker;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003941
3942 // We emit a constant pool when:
3943 // * requested to do so by parameter force_emit (e.g. after each function).
3944 // * the distance from the first instruction accessing the constant pool to
3945 // any of the constant pool entries will exceed its limit the next
3946 // time the pool is checked. This is overly restrictive, but we don't emit
3947 // constant pool entries in-order so it's conservatively correct.
3948 // * the instruction doesn't require a jump after itself to jump over the
3949 // constant pool, and we're getting close to running out of range.
3950 if (!force_emit) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003951 DCHECK(has_fp_values || has_int_values);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003952 bool need_emit = false;
3953 if (has_fp_values) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003954 // The 64-bit constants are always emitted before the 32-bit constants, so
3955 // we can ignore the effect of the 32-bit constants on estimated_size.
3956 int dist64 = pc_offset() + estimated_size -
3957 num_pending_32_bit_constants_ * kPointerSize -
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003958 first_const_pool_64_use_;
3959 if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
3960 (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
3961 need_emit = true;
3962 }
3963 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003964 if (has_int_values) {
3965 int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
3966 if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
3967 (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
3968 need_emit = true;
3969 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003970 }
3971 if (!need_emit) return;
3972 }
3973
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003974 // Deduplicate constants.
3975 int size_after_marker = estimated_size_after_marker;
3976 for (int i = 0; i < num_pending_64_bit_constants_; i++) {
3977 ConstantPoolEntry& entry = pending_64_bit_constants_[i];
3978 DCHECK(!entry.is_merged());
3979 for (int j = 0; j < i; j++) {
3980 if (entry.value64() == pending_64_bit_constants_[j].value64()) {
3981 DCHECK(!pending_64_bit_constants_[j].is_merged());
3982 entry.set_merged_index(j);
3983 size_after_marker -= kDoubleSize;
3984 break;
3985 }
3986 }
3987 }
3988
3989 for (int i = 0; i < num_pending_32_bit_constants_; i++) {
3990 ConstantPoolEntry& entry = pending_32_bit_constants_[i];
3991 DCHECK(!entry.is_merged());
3992 if (!entry.sharing_ok()) continue;
3993 for (int j = 0; j < i; j++) {
3994 if (entry.value() == pending_32_bit_constants_[j].value()) {
3995 DCHECK(!pending_32_bit_constants_[j].is_merged());
3996 entry.set_merged_index(j);
3997 size_after_marker -= kPointerSize;
3998 break;
3999 }
4000 }
4001 }
4002
4003 int size = size_up_to_marker + size_after_marker;
4004
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004005 int needed_space = size + kGap;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004006 while (buffer_space() <= needed_space) GrowBuffer();
Steve Blocka7e24c12009-10-30 11:49:00 +00004007
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004008 {
4009 // Block recursive calls to CheckConstPool.
4010 BlockConstPoolScope block_const_pool(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004011 RecordComment("[ Constant Pool");
4012 RecordConstPool(size);
Steve Blocka7e24c12009-10-30 11:49:00 +00004013
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004014 Label size_check;
4015 bind(&size_check);
4016
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004017 // Emit jump over constant pool if necessary.
4018 Label after_pool;
4019 if (require_jump) {
4020 b(&after_pool);
Steve Blocka7e24c12009-10-30 11:49:00 +00004021 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004022
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004023 // Put down constant pool marker "Undefined instruction".
4024 // The data size helps disassembly know what to print.
4025 emit(kConstantPoolMarker |
4026 EncodeConstantPoolLength(size_after_marker / kPointerSize));
Steve Blocka7e24c12009-10-30 11:49:00 +00004027
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004028 if (require_64_bit_align) {
4029 emit(kConstantPoolMarker);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004030 }
4031
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004032 // Emit 64-bit constant pool entries first: their range is smaller than
4033 // 32-bit entries.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004034 for (int i = 0; i < num_pending_64_bit_constants_; i++) {
4035 ConstantPoolEntry& entry = pending_64_bit_constants_[i];
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004036
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004037 Instr instr = instr_at(entry.position());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004038 // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
4039 DCHECK((IsVldrDPcImmediateOffset(instr) &&
4040 GetVldrDRegisterImmediateOffset(instr) == 0));
4041
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004042 int delta = pc_offset() - entry.position() - kPcLoadDelta;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004043 DCHECK(is_uint10(delta));
4044
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004045 if (entry.is_merged()) {
4046 ConstantPoolEntry& merged =
4047 pending_64_bit_constants_[entry.merged_index()];
4048 DCHECK(entry.value64() == merged.value64());
4049 Instr merged_instr = instr_at(merged.position());
4050 DCHECK(IsVldrDPcImmediateOffset(merged_instr));
4051 delta = GetVldrDRegisterImmediateOffset(merged_instr);
4052 delta += merged.position() - entry.position();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004053 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004054 instr_at_put(entry.position(),
4055 SetVldrDRegisterImmediateOffset(instr, delta));
4056 if (!entry.is_merged()) {
4057 DCHECK(IsAligned(reinterpret_cast<intptr_t>(pc_), kDoubleAlignment));
4058 dq(entry.value64());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004059 }
4060 }
4061
4062 // Emit 32-bit constant pool entries.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004063 for (int i = 0; i < num_pending_32_bit_constants_; i++) {
4064 ConstantPoolEntry& entry = pending_32_bit_constants_[i];
4065 Instr instr = instr_at(entry.position());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004066
4067 // 64-bit loads shouldn't get here.
4068 DCHECK(!IsVldrDPcImmediateOffset(instr));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004069 DCHECK(!IsMovW(instr));
4070 DCHECK(IsLdrPcImmediateOffset(instr) &&
4071 GetLdrRegisterImmediateOffset(instr) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004072
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004073 int delta = pc_offset() - entry.position() - kPcLoadDelta;
4074 DCHECK(is_uint12(delta));
4075 // 0 is the smallest delta:
4076 // ldr rd, [pc, #0]
4077 // constant pool marker
4078 // data
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004079
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004080 if (entry.is_merged()) {
4081 DCHECK(entry.sharing_ok());
4082 ConstantPoolEntry& merged =
4083 pending_32_bit_constants_[entry.merged_index()];
4084 DCHECK(entry.value() == merged.value());
4085 Instr merged_instr = instr_at(merged.position());
4086 DCHECK(IsLdrPcImmediateOffset(merged_instr));
4087 delta = GetLdrRegisterImmediateOffset(merged_instr);
4088 delta += merged.position() - entry.position();
4089 }
4090 instr_at_put(entry.position(),
4091 SetLdrRegisterImmediateOffset(instr, delta));
4092 if (!entry.is_merged()) {
4093 emit(entry.value());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004094 }
4095 }
4096
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004097 num_pending_32_bit_constants_ = 0;
4098 num_pending_64_bit_constants_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004099 first_const_pool_32_use_ = -1;
4100 first_const_pool_64_use_ = -1;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004101
4102 RecordComment("]");
4103
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004104 DCHECK_EQ(size, SizeOfCodeGeneratedSince(&size_check));
4105
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004106 if (after_pool.is_linked()) {
4107 bind(&after_pool);
4108 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004109 }
4110
4111 // Since a constant pool was just emitted, move the check offset forward by
4112 // the standard interval.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004113 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
Steve Blocka7e24c12009-10-30 11:49:00 +00004114}
4115
4116
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004117void Assembler::PatchConstantPoolAccessInstruction(
4118 int pc_offset, int offset, ConstantPoolEntry::Access access,
4119 ConstantPoolEntry::Type type) {
4120 DCHECK(FLAG_enable_embedded_constant_pool);
4121 Address pc = buffer_ + pc_offset;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004122
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004123 // Patch vldr/ldr instruction with correct offset.
4124 Instr instr = instr_at(pc);
4125 if (access == ConstantPoolEntry::OVERFLOWED) {
4126 if (CpuFeatures::IsSupported(ARMv7)) {
4127 // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
4128 Instr next_instr = instr_at(pc + kInstrSize);
4129 DCHECK((IsMovW(instr) && Instruction::ImmedMovwMovtValue(instr) == 0));
4130 DCHECK((IsMovT(next_instr) &&
4131 Instruction::ImmedMovwMovtValue(next_instr) == 0));
4132 instr_at_put(pc, PatchMovwImmediate(instr, offset & 0xffff));
4133 instr_at_put(pc + kInstrSize,
4134 PatchMovwImmediate(next_instr, offset >> 16));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004135 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004136 // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
4137 Instr instr_2 = instr_at(pc + kInstrSize);
4138 Instr instr_3 = instr_at(pc + 2 * kInstrSize);
4139 Instr instr_4 = instr_at(pc + 3 * kInstrSize);
4140 DCHECK((IsMovImmed(instr) && Instruction::Immed8Value(instr) == 0));
4141 DCHECK((IsOrrImmed(instr_2) && Instruction::Immed8Value(instr_2) == 0) &&
4142 GetRn(instr_2).is(GetRd(instr_2)));
4143 DCHECK((IsOrrImmed(instr_3) && Instruction::Immed8Value(instr_3) == 0) &&
4144 GetRn(instr_3).is(GetRd(instr_3)));
4145 DCHECK((IsOrrImmed(instr_4) && Instruction::Immed8Value(instr_4) == 0) &&
4146 GetRn(instr_4).is(GetRd(instr_4)));
4147 instr_at_put(pc, PatchShiftImm(instr, (offset & kImm8Mask)));
4148 instr_at_put(pc + kInstrSize,
4149 PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
4150 instr_at_put(pc + 2 * kInstrSize,
4151 PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
4152 instr_at_put(pc + 3 * kInstrSize,
4153 PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004154 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004155 } else if (type == ConstantPoolEntry::DOUBLE) {
4156 // Instruction to patch must be 'vldr rd, [pp, #0]'.
4157 DCHECK((IsVldrDPpImmediateOffset(instr) &&
4158 GetVldrDRegisterImmediateOffset(instr) == 0));
4159 DCHECK(is_uint10(offset));
4160 instr_at_put(pc, SetVldrDRegisterImmediateOffset(instr, offset));
4161 } else {
4162 // Instruction to patch must be 'ldr rd, [pp, #0]'.
4163 DCHECK((IsLdrPpImmediateOffset(instr) &&
4164 GetLdrRegisterImmediateOffset(instr) == 0));
4165 DCHECK(is_uint12(offset));
4166 instr_at_put(pc, SetLdrRegisterImmediateOffset(instr, offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004167 }
4168}
4169
4170
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004171} // namespace internal
4172} // namespace v8
Leon Clarkef7060e22010-06-03 12:02:55 +01004173
4174#endif // V8_TARGET_ARCH_ARM