blob: 546025475ebff61ff3c6ed960f487315ff6a8a61 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_ARM64_ASSEMBLER_ARM64_H_
6#define V8_ARM64_ASSEMBLER_ARM64_H_
7
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00008#include <deque>
Ben Murdochb8a8cc12014-11-26 15:28:44 +00009#include <list>
10#include <map>
11#include <vector>
12
13#include "src/arm64/instructions-arm64.h"
14#include "src/assembler.h"
15#include "src/globals.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000016#include "src/utils.h"
17
18
19namespace v8 {
20namespace internal {
21
22
23// -----------------------------------------------------------------------------
24// Registers.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000025// clang-format off
26#define GENERAL_REGISTER_CODE_LIST(R) \
27 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
28 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
29 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
30 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
Ben Murdochb8a8cc12014-11-26 15:28:44 +000031
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000032#define GENERAL_REGISTERS(R) \
33 R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
34 R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
35 R(x16) R(x17) R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) \
36 R(x24) R(x25) R(x26) R(x27) R(x28) R(x29) R(x30) R(x31)
37
38#define ALLOCATABLE_GENERAL_REGISTERS(R) \
39 R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
40 R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
41 R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27)
42
43#define DOUBLE_REGISTERS(R) \
44 R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
45 R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d15) \
46 R(d16) R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) \
47 R(d24) R(d25) R(d26) R(d27) R(d28) R(d29) R(d30) R(d31)
48
49#define ALLOCATABLE_DOUBLE_REGISTERS(R) \
50 R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
51 R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d16) \
52 R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) R(d24) \
53 R(d25) R(d26) R(d27) R(d28)
54// clang-format on
Ben Murdochb8a8cc12014-11-26 15:28:44 +000055
56static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
57
58
59// Some CPURegister methods can return Register and FPRegister types, so we
60// need to declare them in advance.
61struct Register;
62struct FPRegister;
63
64
65struct CPURegister {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000066 enum Code {
67#define REGISTER_CODE(R) kCode_##R,
68 GENERAL_REGISTERS(REGISTER_CODE)
69#undef REGISTER_CODE
70 kAfterLast,
71 kCode_no_reg = -1
72 };
73
Ben Murdochb8a8cc12014-11-26 15:28:44 +000074 enum RegisterType {
75 // The kInvalid value is used to detect uninitialized static instances,
76 // which are always zero-initialized before any constructors are called.
77 kInvalid = 0,
78 kRegister,
79 kFPRegister,
80 kNoRegister
81 };
82
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000083 static CPURegister Create(int code, int size, RegisterType type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000084 CPURegister r = {code, size, type};
85 return r;
86 }
87
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000088 int code() const;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000089 RegisterType type() const;
90 RegList Bit() const;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000091 int SizeInBits() const;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000092 int SizeInBytes() const;
93 bool Is32Bits() const;
94 bool Is64Bits() const;
95 bool IsValid() const;
96 bool IsValidOrNone() const;
97 bool IsValidRegister() const;
98 bool IsValidFPRegister() const;
99 bool IsNone() const;
100 bool Is(const CPURegister& other) const;
101 bool Aliases(const CPURegister& other) const;
102
103 bool IsZero() const;
104 bool IsSP() const;
105
106 bool IsRegister() const;
107 bool IsFPRegister() const;
108
109 Register X() const;
110 Register W() const;
111 FPRegister D() const;
112 FPRegister S() const;
113
114 bool IsSameSizeAndType(const CPURegister& other) const;
115
116 // V8 compatibility.
117 bool is(const CPURegister& other) const { return Is(other); }
118 bool is_valid() const { return IsValid(); }
119
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000120 int reg_code;
121 int reg_size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000122 RegisterType reg_type;
123};
124
125
126struct Register : public CPURegister {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000127 static Register Create(int code, int size) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000128 return Register(CPURegister::Create(code, size, CPURegister::kRegister));
129 }
130
131 Register() {
132 reg_code = 0;
133 reg_size = 0;
134 reg_type = CPURegister::kNoRegister;
135 }
136
137 explicit Register(const CPURegister& r) {
138 reg_code = r.reg_code;
139 reg_size = r.reg_size;
140 reg_type = r.reg_type;
141 DCHECK(IsValidOrNone());
142 }
143
144 Register(const Register& r) { // NOLINT(runtime/explicit)
145 reg_code = r.reg_code;
146 reg_size = r.reg_size;
147 reg_type = r.reg_type;
148 DCHECK(IsValidOrNone());
149 }
150
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000151 const char* ToString();
152 bool IsAllocatable() const;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000153 bool IsValid() const {
154 DCHECK(IsRegister() || IsNone());
155 return IsValidRegister();
156 }
157
158 static Register XRegFromCode(unsigned code);
159 static Register WRegFromCode(unsigned code);
160
161 // Start of V8 compatibility section ---------------------
162 // These memebers are necessary for compilation.
163 // A few of them may be unused for now.
164
165 static const int kNumRegisters = kNumberOfRegisters;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000166 STATIC_ASSERT(kNumRegisters == Code::kAfterLast);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000167 static int NumRegisters() { return kNumRegisters; }
168
169 // We allow crankshaft to use the following registers:
170 // - x0 to x15
171 // - x18 to x24
172 // - x27 (also context)
173 //
174 // TODO(all): Register x25 is currently free and could be available for
175 // crankshaft, but we don't use it as we might use it as a per function
176 // literal pool pointer in the future.
177 //
178 // TODO(all): Consider storing cp in x25 to have only two ranges.
179 // We split allocatable registers in three ranges called
180 // - "low range"
181 // - "high range"
182 // - "context"
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000183
184 static Register from_code(int code) {
185 // Always return an X register.
186 return Register::Create(code, kXRegSizeInBits);
187 }
188
189 // End of V8 compatibility section -----------------------
190};
191
192
193struct FPRegister : public CPURegister {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000194 enum Code {
195#define REGISTER_CODE(R) kCode_##R,
196 DOUBLE_REGISTERS(REGISTER_CODE)
197#undef REGISTER_CODE
198 kAfterLast,
199 kCode_no_reg = -1
200 };
201
202 static FPRegister Create(int code, int size) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000203 return FPRegister(
204 CPURegister::Create(code, size, CPURegister::kFPRegister));
205 }
206
207 FPRegister() {
208 reg_code = 0;
209 reg_size = 0;
210 reg_type = CPURegister::kNoRegister;
211 }
212
213 explicit FPRegister(const CPURegister& r) {
214 reg_code = r.reg_code;
215 reg_size = r.reg_size;
216 reg_type = r.reg_type;
217 DCHECK(IsValidOrNone());
218 }
219
220 FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
221 reg_code = r.reg_code;
222 reg_size = r.reg_size;
223 reg_type = r.reg_type;
224 DCHECK(IsValidOrNone());
225 }
226
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000227 const char* ToString();
228 bool IsAllocatable() const;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000229 bool IsValid() const {
230 DCHECK(IsFPRegister() || IsNone());
231 return IsValidFPRegister();
232 }
233
234 static FPRegister SRegFromCode(unsigned code);
235 static FPRegister DRegFromCode(unsigned code);
236
237 // Start of V8 compatibility section ---------------------
238 static const int kMaxNumRegisters = kNumberOfFPRegisters;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000239 STATIC_ASSERT(kMaxNumRegisters == Code::kAfterLast);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000240
241 // Crankshaft can use all the FP registers except:
242 // - d15 which is used to keep the 0 double value
243 // - d30 which is used in crankshaft as a double scratch register
244 // - d31 which is used in the MacroAssembler as a double scratch register
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000245 static FPRegister from_code(int code) {
246 // Always return a D register.
247 return FPRegister::Create(code, kDRegSizeInBits);
248 }
249 // End of V8 compatibility section -----------------------
250};
251
252
253STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
254STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
255
256
257#if defined(ARM64_DEFINE_REG_STATICS)
258#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
259 const CPURegister init_##register_class##_##name = {code, size, type}; \
260 const register_class& name = *reinterpret_cast<const register_class*>( \
261 &init_##register_class##_##name)
262#define ALIAS_REGISTER(register_class, alias, name) \
263 const register_class& alias = *reinterpret_cast<const register_class*>( \
264 &init_##register_class##_##name)
265#else
266#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
267 extern const register_class& name
268#define ALIAS_REGISTER(register_class, alias, name) \
269 extern const register_class& alias
270#endif // defined(ARM64_DEFINE_REG_STATICS)
271
272// No*Reg is used to indicate an unused argument, or an error case. Note that
273// these all compare equal (using the Is() method). The Register and FPRegister
274// variants are provided for convenience.
275INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
276INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
277INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
278
279// v8 compatibility.
280INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
281
282#define DEFINE_REGISTERS(N) \
283 INITIALIZE_REGISTER(Register, w##N, N, \
284 kWRegSizeInBits, CPURegister::kRegister); \
285 INITIALIZE_REGISTER(Register, x##N, N, \
286 kXRegSizeInBits, CPURegister::kRegister);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000287GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000288#undef DEFINE_REGISTERS
289
290INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
291 CPURegister::kRegister);
292INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
293 CPURegister::kRegister);
294
295#define DEFINE_FPREGISTERS(N) \
296 INITIALIZE_REGISTER(FPRegister, s##N, N, \
297 kSRegSizeInBits, CPURegister::kFPRegister); \
298 INITIALIZE_REGISTER(FPRegister, d##N, N, \
299 kDRegSizeInBits, CPURegister::kFPRegister);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000300GENERAL_REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000301#undef DEFINE_FPREGISTERS
302
303#undef INITIALIZE_REGISTER
304
305// Registers aliases.
306ALIAS_REGISTER(Register, ip0, x16);
307ALIAS_REGISTER(Register, ip1, x17);
308ALIAS_REGISTER(Register, wip0, w16);
309ALIAS_REGISTER(Register, wip1, w17);
310// Root register.
311ALIAS_REGISTER(Register, root, x26);
312ALIAS_REGISTER(Register, rr, x26);
313// Context pointer register.
314ALIAS_REGISTER(Register, cp, x27);
315// We use a register as a JS stack pointer to overcome the restriction on the
316// architectural SP alignment.
317// We chose x28 because it is contiguous with the other specific purpose
318// registers.
319STATIC_ASSERT(kJSSPCode == 28);
320ALIAS_REGISTER(Register, jssp, x28);
321ALIAS_REGISTER(Register, wjssp, w28);
322ALIAS_REGISTER(Register, fp, x29);
323ALIAS_REGISTER(Register, lr, x30);
324ALIAS_REGISTER(Register, xzr, x31);
325ALIAS_REGISTER(Register, wzr, w31);
326
327// Keeps the 0 double value.
328ALIAS_REGISTER(FPRegister, fp_zero, d15);
329// Crankshaft double scratch register.
330ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
331// MacroAssembler double scratch registers.
332ALIAS_REGISTER(FPRegister, fp_scratch, d30);
333ALIAS_REGISTER(FPRegister, fp_scratch1, d30);
334ALIAS_REGISTER(FPRegister, fp_scratch2, d31);
335
336#undef ALIAS_REGISTER
337
338
339Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
340 Register reg2 = NoReg,
341 Register reg3 = NoReg,
342 Register reg4 = NoReg);
343
344
345// AreAliased returns true if any of the named registers overlap. Arguments set
346// to NoReg are ignored. The system stack pointer may be specified.
347bool AreAliased(const CPURegister& reg1,
348 const CPURegister& reg2,
349 const CPURegister& reg3 = NoReg,
350 const CPURegister& reg4 = NoReg,
351 const CPURegister& reg5 = NoReg,
352 const CPURegister& reg6 = NoReg,
353 const CPURegister& reg7 = NoReg,
354 const CPURegister& reg8 = NoReg);
355
356// AreSameSizeAndType returns true if all of the specified registers have the
357// same size, and are of the same type. The system stack pointer may be
358// specified. Arguments set to NoReg are ignored, as are any subsequent
359// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
360bool AreSameSizeAndType(const CPURegister& reg1,
361 const CPURegister& reg2,
362 const CPURegister& reg3 = NoCPUReg,
363 const CPURegister& reg4 = NoCPUReg,
364 const CPURegister& reg5 = NoCPUReg,
365 const CPURegister& reg6 = NoCPUReg,
366 const CPURegister& reg7 = NoCPUReg,
367 const CPURegister& reg8 = NoCPUReg);
368
369
370typedef FPRegister DoubleRegister;
371
Ben Murdoch097c5b22016-05-18 11:27:45 +0100372// TODO(arm64) Define SIMD registers.
373typedef FPRegister Simd128Register;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000374
375// -----------------------------------------------------------------------------
376// Lists of registers.
377class CPURegList {
378 public:
379 explicit CPURegList(CPURegister reg1,
380 CPURegister reg2 = NoCPUReg,
381 CPURegister reg3 = NoCPUReg,
382 CPURegister reg4 = NoCPUReg)
383 : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
384 size_(reg1.SizeInBits()), type_(reg1.type()) {
385 DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
386 DCHECK(IsValid());
387 }
388
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000389 CPURegList(CPURegister::RegisterType type, int size, RegList list)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000390 : list_(list), size_(size), type_(type) {
391 DCHECK(IsValid());
392 }
393
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000394 CPURegList(CPURegister::RegisterType type, int size, int first_reg,
395 int last_reg)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000396 : size_(size), type_(type) {
397 DCHECK(((type == CPURegister::kRegister) &&
398 (last_reg < kNumberOfRegisters)) ||
399 ((type == CPURegister::kFPRegister) &&
400 (last_reg < kNumberOfFPRegisters)));
401 DCHECK(last_reg >= first_reg);
402 list_ = (1UL << (last_reg + 1)) - 1;
403 list_ &= ~((1UL << first_reg) - 1);
404 DCHECK(IsValid());
405 }
406
407 CPURegister::RegisterType type() const {
408 DCHECK(IsValid());
409 return type_;
410 }
411
412 RegList list() const {
413 DCHECK(IsValid());
414 return list_;
415 }
416
417 inline void set_list(RegList new_list) {
418 DCHECK(IsValid());
419 list_ = new_list;
420 }
421
422 // Combine another CPURegList into this one. Registers that already exist in
423 // this list are left unchanged. The type and size of the registers in the
424 // 'other' list must match those in this list.
425 void Combine(const CPURegList& other);
426
427 // Remove every register in the other CPURegList from this one. Registers that
428 // do not exist in this list are ignored. The type of the registers in the
429 // 'other' list must match those in this list.
430 void Remove(const CPURegList& other);
431
432 // Variants of Combine and Remove which take CPURegisters.
433 void Combine(const CPURegister& other);
434 void Remove(const CPURegister& other1,
435 const CPURegister& other2 = NoCPUReg,
436 const CPURegister& other3 = NoCPUReg,
437 const CPURegister& other4 = NoCPUReg);
438
439 // Variants of Combine and Remove which take a single register by its code;
440 // the type and size of the register is inferred from this list.
441 void Combine(int code);
442 void Remove(int code);
443
444 // Remove all callee-saved registers from the list. This can be useful when
445 // preparing registers for an AAPCS64 function call, for example.
446 void RemoveCalleeSaved();
447
448 CPURegister PopLowestIndex();
449 CPURegister PopHighestIndex();
450
451 // AAPCS64 callee-saved registers.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000452 static CPURegList GetCalleeSaved(int size = kXRegSizeInBits);
453 static CPURegList GetCalleeSavedFP(int size = kDRegSizeInBits);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000454
455 // AAPCS64 caller-saved registers. Note that this includes lr.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000456 static CPURegList GetCallerSaved(int size = kXRegSizeInBits);
457 static CPURegList GetCallerSavedFP(int size = kDRegSizeInBits);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000458
459 // Registers saved as safepoints.
460 static CPURegList GetSafepointSavedRegisters();
461
462 bool IsEmpty() const {
463 DCHECK(IsValid());
464 return list_ == 0;
465 }
466
467 bool IncludesAliasOf(const CPURegister& other1,
468 const CPURegister& other2 = NoCPUReg,
469 const CPURegister& other3 = NoCPUReg,
470 const CPURegister& other4 = NoCPUReg) const {
471 DCHECK(IsValid());
472 RegList list = 0;
473 if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
474 if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
475 if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
476 if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
477 return (list_ & list) != 0;
478 }
479
480 int Count() const {
481 DCHECK(IsValid());
482 return CountSetBits(list_, kRegListSizeInBits);
483 }
484
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000485 int RegisterSizeInBits() const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000486 DCHECK(IsValid());
487 return size_;
488 }
489
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000490 int RegisterSizeInBytes() const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000491 int size_in_bits = RegisterSizeInBits();
492 DCHECK((size_in_bits % kBitsPerByte) == 0);
493 return size_in_bits / kBitsPerByte;
494 }
495
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000496 int TotalSizeInBytes() const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000497 DCHECK(IsValid());
498 return RegisterSizeInBytes() * Count();
499 }
500
501 private:
502 RegList list_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000503 int size_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000504 CPURegister::RegisterType type_;
505
506 bool IsValid() const {
507 const RegList kValidRegisters = 0x8000000ffffffff;
508 const RegList kValidFPRegisters = 0x0000000ffffffff;
509 switch (type_) {
510 case CPURegister::kRegister:
511 return (list_ & kValidRegisters) == list_;
512 case CPURegister::kFPRegister:
513 return (list_ & kValidFPRegisters) == list_;
514 case CPURegister::kNoRegister:
515 return list_ == 0;
516 default:
517 UNREACHABLE();
518 return false;
519 }
520 }
521};
522
523
524// AAPCS64 callee-saved registers.
525#define kCalleeSaved CPURegList::GetCalleeSaved()
526#define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
527
528
529// AAPCS64 caller-saved registers. Note that this includes lr.
530#define kCallerSaved CPURegList::GetCallerSaved()
531#define kCallerSavedFP CPURegList::GetCallerSavedFP()
532
533// -----------------------------------------------------------------------------
534// Immediates.
535class Immediate {
536 public:
537 template<typename T>
538 inline explicit Immediate(Handle<T> handle);
539
540 // This is allowed to be an implicit constructor because Immediate is
541 // a wrapper class that doesn't normally perform any type conversion.
542 template<typename T>
543 inline Immediate(T value); // NOLINT(runtime/explicit)
544
545 template<typename T>
546 inline Immediate(T value, RelocInfo::Mode rmode);
547
548 int64_t value() const { return value_; }
549 RelocInfo::Mode rmode() const { return rmode_; }
550
551 private:
552 void InitializeHandle(Handle<Object> value);
553
554 int64_t value_;
555 RelocInfo::Mode rmode_;
556};
557
558
559// -----------------------------------------------------------------------------
560// Operands.
561const int kSmiShift = kSmiTagSize + kSmiShiftSize;
562const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
563
564// Represents an operand in a machine instruction.
565class Operand {
566 // TODO(all): If necessary, study more in details which methods
567 // TODO(all): should be inlined or not.
568 public:
569 // rm, {<shift> {#<shift_amount>}}
570 // where <shift> is one of {LSL, LSR, ASR, ROR}.
571 // <shift_amount> is uint6_t.
572 // This is allowed to be an implicit constructor because Operand is
573 // a wrapper class that doesn't normally perform any type conversion.
574 inline Operand(Register reg,
575 Shift shift = LSL,
576 unsigned shift_amount = 0); // NOLINT(runtime/explicit)
577
578 // rm, <extend> {#<shift_amount>}
579 // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
580 // <shift_amount> is uint2_t.
581 inline Operand(Register reg,
582 Extend extend,
583 unsigned shift_amount = 0);
584
585 template<typename T>
586 inline explicit Operand(Handle<T> handle);
587
588 // Implicit constructor for all int types, ExternalReference, and Smi.
589 template<typename T>
590 inline Operand(T t); // NOLINT(runtime/explicit)
591
592 // Implicit constructor for int types.
593 template<typename T>
594 inline Operand(T t, RelocInfo::Mode rmode);
595
596 inline bool IsImmediate() const;
597 inline bool IsShiftedRegister() const;
598 inline bool IsExtendedRegister() const;
599 inline bool IsZero() const;
600
601 // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
602 // which helps in the encoding of instructions that use the stack pointer.
603 inline Operand ToExtendedRegister() const;
604
605 inline Immediate immediate() const;
606 inline int64_t ImmediateValue() const;
607 inline Register reg() const;
608 inline Shift shift() const;
609 inline Extend extend() const;
610 inline unsigned shift_amount() const;
611
612 // Relocation information.
613 bool NeedsRelocation(const Assembler* assembler) const;
614
615 // Helpers
616 inline static Operand UntagSmi(Register smi);
617 inline static Operand UntagSmiAndScale(Register smi, int scale);
618
619 private:
620 Immediate immediate_;
621 Register reg_;
622 Shift shift_;
623 Extend extend_;
624 unsigned shift_amount_;
625};
626
627
628// MemOperand represents a memory operand in a load or store instruction.
629class MemOperand {
630 public:
631 inline MemOperand();
632 inline explicit MemOperand(Register base,
633 int64_t offset = 0,
634 AddrMode addrmode = Offset);
635 inline explicit MemOperand(Register base,
636 Register regoffset,
637 Shift shift = LSL,
638 unsigned shift_amount = 0);
639 inline explicit MemOperand(Register base,
640 Register regoffset,
641 Extend extend,
642 unsigned shift_amount = 0);
643 inline explicit MemOperand(Register base,
644 const Operand& offset,
645 AddrMode addrmode = Offset);
646
647 const Register& base() const { return base_; }
648 const Register& regoffset() const { return regoffset_; }
649 int64_t offset() const { return offset_; }
650 AddrMode addrmode() const { return addrmode_; }
651 Shift shift() const { return shift_; }
652 Extend extend() const { return extend_; }
653 unsigned shift_amount() const { return shift_amount_; }
654 inline bool IsImmediateOffset() const;
655 inline bool IsRegisterOffset() const;
656 inline bool IsPreIndex() const;
657 inline bool IsPostIndex() const;
658
659 // For offset modes, return the offset as an Operand. This helper cannot
660 // handle indexed modes.
661 inline Operand OffsetAsOperand() const;
662
663 enum PairResult {
664 kNotPair, // Can't use a pair instruction.
665 kPairAB, // Can use a pair instruction (operandA has lower address).
666 kPairBA // Can use a pair instruction (operandB has lower address).
667 };
668 // Check if two MemOperand are consistent for stp/ldp use.
669 static PairResult AreConsistentForPair(const MemOperand& operandA,
670 const MemOperand& operandB,
671 int access_size_log2 = kXRegSizeLog2);
672
673 private:
674 Register base_;
675 Register regoffset_;
676 int64_t offset_;
677 AddrMode addrmode_;
678 Shift shift_;
679 Extend extend_;
680 unsigned shift_amount_;
681};
682
683
684class ConstPool {
685 public:
686 explicit ConstPool(Assembler* assm)
687 : assm_(assm),
688 first_use_(-1),
689 shared_entries_count(0) {}
690 void RecordEntry(intptr_t data, RelocInfo::Mode mode);
691 int EntryCount() const {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000692 return shared_entries_count + static_cast<int>(unique_entries_.size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000693 }
694 bool IsEmpty() const {
695 return shared_entries_.empty() && unique_entries_.empty();
696 }
697 // Distance in bytes between the current pc and the first instruction
698 // using the pool. If there are no pending entries return kMaxInt.
699 int DistanceToFirstUse();
700 // Offset after which instructions using the pool will be out of range.
701 int MaxPcOffset();
702 // Maximum size the constant pool can be with current entries. It always
703 // includes alignment padding and branch over.
704 int WorstCaseSize();
705 // Size in bytes of the literal pool *if* it is emitted at the current
706 // pc. The size will include the branch over the pool if it was requested.
707 int SizeIfEmittedAtCurrentPc(bool require_jump);
708 // Emit the literal pool at the current pc with a branch over the pool if
709 // requested.
710 void Emit(bool require_jump);
711 // Discard any pending pool entries.
712 void Clear();
713
714 private:
715 bool CanBeShared(RelocInfo::Mode mode);
716 void EmitMarker();
717 void EmitGuard();
718 void EmitEntries();
719
720 Assembler* assm_;
721 // Keep track of the first instruction requiring a constant pool entry
722 // since the previous constant pool was emitted.
723 int first_use_;
724 // values, pc offset(s) of entries which can be shared.
725 std::multimap<uint64_t, int> shared_entries_;
726 // Number of distinct literal in shared entries.
727 int shared_entries_count;
728 // values, pc offset of entries which cannot be shared.
729 std::vector<std::pair<uint64_t, int> > unique_entries_;
730};
731
732
733// -----------------------------------------------------------------------------
734// Assembler.
735
736class Assembler : public AssemblerBase {
737 public:
738 // Create an assembler. Instructions and relocation information are emitted
739 // into a buffer, with the instructions starting from the beginning and the
740 // relocation information starting from the end of the buffer. See CodeDesc
741 // for a detailed comment on the layout (globals.h).
742 //
743 // If the provided buffer is NULL, the assembler allocates and grows its own
744 // buffer, and buffer_size determines the initial buffer size. The buffer is
745 // owned by the assembler and deallocated upon destruction of the assembler.
746 //
747 // If the provided buffer is not NULL, the assembler uses the provided buffer
748 // for code generation and assumes its size to be buffer_size. If the buffer
749 // is too small, a fatal error occurs. No deallocation of the buffer is done
750 // upon destruction of the assembler.
751 Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
752
753 virtual ~Assembler();
754
755 virtual void AbortedCodeGeneration() {
756 constpool_.Clear();
757 }
758
759 // System functions ---------------------------------------------------------
760 // Start generating code from the beginning of the buffer, discarding any code
761 // and data that has already been emitted into the buffer.
762 //
763 // In order to avoid any accidental transfer of state, Reset DCHECKs that the
764 // constant pool is not blocked.
765 void Reset();
766
767 // GetCode emits any pending (non-emitted) code and fills the descriptor
768 // desc. GetCode() is idempotent; it returns the same result if no other
769 // Assembler functions are invoked in between GetCode() calls.
770 //
771 // The descriptor (desc) can be NULL. In that case, the code is finalized as
772 // usual, but the descriptor is not populated.
773 void GetCode(CodeDesc* desc);
774
775 // Insert the smallest number of nop instructions
776 // possible to align the pc offset to a multiple
777 // of m. m must be a power of 2 (>= 4).
778 void Align(int m);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000779 // Insert the smallest number of zero bytes possible to align the pc offset
780 // to a mulitple of m. m must be a power of 2 (>= 2).
781 void DataAlign(int m);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000782
783 inline void Unreachable();
784
785 // Label --------------------------------------------------------------------
786 // Bind a label to the current pc. Note that labels can only be bound once,
787 // and if labels are linked to other instructions, they _must_ be bound
788 // before they go out of scope.
789 void bind(Label* label);
790
791
792 // RelocInfo and pools ------------------------------------------------------
793
794 // Record relocation information for current pc_.
795 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
796
797 // Return the address in the constant pool of the code target address used by
798 // the branch/call instruction at pc.
799 inline static Address target_pointer_address_at(Address pc);
800
801 // Read/Modify the code target address in the branch/call instruction at pc.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000802 inline static Address target_address_at(Address pc, Address constant_pool);
803 inline static void set_target_address_at(
804 Isolate* isolate, Address pc, Address constant_pool, Address target,
805 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000806 static inline Address target_address_at(Address pc, Code* code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000807 static inline void set_target_address_at(
808 Isolate* isolate, Address pc, Code* code, Address target,
809 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000810
811 // Return the code target address at a call site from the return address of
812 // that call in the instruction stream.
813 inline static Address target_address_from_return_address(Address pc);
814
815 // Given the address of the beginning of a call, return the address in the
816 // instruction stream that call will return from.
817 inline static Address return_address_from_call_start(Address pc);
818
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000819 // This sets the branch destination (which is in the constant pool on ARM).
820 // This is for calls and branches within generated code.
821 inline static void deserialization_set_special_target_at(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000822 Isolate* isolate, Address constant_pool_entry, Code* code,
823 Address target);
824
825 // This sets the internal reference at the pc.
826 inline static void deserialization_set_target_internal_reference_at(
827 Isolate* isolate, Address pc, Address target,
828 RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000829
830 // All addresses in the constant pool are the same size as pointers.
831 static const int kSpecialTargetSize = kPointerSize;
832
833 // The sizes of the call sequences emitted by MacroAssembler::Call.
834 // Wherever possible, use MacroAssembler::CallSize instead of these constants,
835 // as it will choose the correct value for a given relocation mode.
836 //
837 // Without relocation:
838 // movz temp, #(target & 0x000000000000ffff)
839 // movk temp, #(target & 0x00000000ffff0000)
840 // movk temp, #(target & 0x0000ffff00000000)
841 // blr temp
842 //
843 // With relocation:
844 // ldr temp, =target
845 // blr temp
846 static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
847 static const int kCallSizeWithRelocation = 2 * kInstructionSize;
848
849 // Size of the generated code in bytes
850 uint64_t SizeOfGeneratedCode() const {
851 DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
852 return pc_ - buffer_;
853 }
854
855 // Return the code size generated from label to the current position.
856 uint64_t SizeOfCodeGeneratedSince(const Label* label) {
857 DCHECK(label->is_bound());
858 DCHECK(pc_offset() >= label->pos());
859 DCHECK(pc_offset() < buffer_size_);
860 return pc_offset() - label->pos();
861 }
862
863 // Check the size of the code generated since the given label. This function
864 // is used primarily to work around comparisons between signed and unsigned
865 // quantities, since V8 uses both.
866 // TODO(jbramley): Work out what sign to use for these things and if possible,
867 // change things to be consistent.
868 void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
869 DCHECK(size >= 0);
870 DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
871 }
872
873 // Return the number of instructions generated from label to the
874 // current position.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000875 uint64_t InstructionsGeneratedSince(const Label* label) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000876 return SizeOfCodeGeneratedSince(label) / kInstructionSize;
877 }
878
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000879 static const int kPatchDebugBreakSlotAddressOffset = 0;
880
881 // Number of instructions necessary to be able to later patch it to a call.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000882 static const int kDebugBreakSlotInstructions = 5;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000883 static const int kDebugBreakSlotLength =
884 kDebugBreakSlotInstructions * kInstructionSize;
885
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000886 // Prevent contant pool emission until EndBlockConstPool is called.
887 // Call to this function can be nested but must be followed by an equal
888 // number of call to EndBlockConstpool.
889 void StartBlockConstPool();
890
891 // Resume constant pool emission. Need to be called as many time as
892 // StartBlockConstPool to have an effect.
893 void EndBlockConstPool();
894
895 bool is_const_pool_blocked() const;
896 static bool IsConstantPoolAt(Instruction* instr);
897 static int ConstantPoolSizeAt(Instruction* instr);
898 // See Assembler::CheckConstPool for more info.
899 void EmitPoolGuard();
900
901 // Prevent veneer pool emission until EndBlockVeneerPool is called.
902 // Call to this function can be nested but must be followed by an equal
903 // number of call to EndBlockConstpool.
904 void StartBlockVeneerPool();
905
906 // Resume constant pool emission. Need to be called as many time as
907 // StartBlockVeneerPool to have an effect.
908 void EndBlockVeneerPool();
909
910 bool is_veneer_pool_blocked() const {
911 return veneer_pool_blocked_nesting_ > 0;
912 }
913
914 // Block/resume emission of constant pools and veneer pools.
915 void StartBlockPools() {
916 StartBlockConstPool();
917 StartBlockVeneerPool();
918 }
919 void EndBlockPools() {
920 EndBlockConstPool();
921 EndBlockVeneerPool();
922 }
923
924 // Debugging ----------------------------------------------------------------
Ben Murdochda12d292016-06-02 14:46:10 +0100925 AssemblerPositionsRecorder* positions_recorder() {
926 return &positions_recorder_;
927 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000928 void RecordComment(const char* msg);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000929
930 // Record a deoptimization reason that can be used by a log or cpu profiler.
931 // Use --trace-deopt to enable.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100932 void RecordDeoptReason(const int reason, int raw_position);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000933
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000934 int buffer_space() const;
935
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000936 // Mark generator continuation.
937 void RecordGeneratorContinuation();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000938
939 // Mark address of a debug break slot.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000940 void RecordDebugBreakSlot(RelocInfo::Mode mode);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000941
942 // Record the emission of a constant pool.
943 //
944 // The emission of constant and veneer pools depends on the size of the code
945 // generated and the number of RelocInfo recorded.
946 // The Debug mechanism needs to map code offsets between two versions of a
947 // function, compiled with and without debugger support (see for example
948 // Debug::PrepareForBreakPoints()).
949 // Compiling functions with debugger support generates additional code
950 // (DebugCodegen::GenerateSlot()). This may affect the emission of the pools
951 // and cause the version of the code with debugger support to have pools
952 // generated in different places.
953 // Recording the position and size of emitted pools allows to correctly
954 // compute the offset mappings between the different versions of a function in
955 // all situations.
956 //
957 // The parameter indicates the size of the pool (in bytes), including
958 // the marker and branch over the data.
959 void RecordConstPool(int size);
960
961
962 // Instruction set functions ------------------------------------------------
963
964 // Branch / Jump instructions.
965 // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
966 // Branch to register.
967 void br(const Register& xn);
968
969 // Branch-link to register.
970 void blr(const Register& xn);
971
972 // Branch to register with return hint.
973 void ret(const Register& xn = lr);
974
975 // Unconditional branch to label.
976 void b(Label* label);
977
978 // Conditional branch to label.
979 void b(Label* label, Condition cond);
980
981 // Unconditional branch to PC offset.
982 void b(int imm26);
983
984 // Conditional branch to PC offset.
985 void b(int imm19, Condition cond);
986
987 // Branch-link to label / pc offset.
988 void bl(Label* label);
989 void bl(int imm26);
990
991 // Compare and branch to label / pc offset if zero.
992 void cbz(const Register& rt, Label* label);
993 void cbz(const Register& rt, int imm19);
994
995 // Compare and branch to label / pc offset if not zero.
996 void cbnz(const Register& rt, Label* label);
997 void cbnz(const Register& rt, int imm19);
998
999 // Test bit and branch to label / pc offset if zero.
1000 void tbz(const Register& rt, unsigned bit_pos, Label* label);
1001 void tbz(const Register& rt, unsigned bit_pos, int imm14);
1002
1003 // Test bit and branch to label / pc offset if not zero.
1004 void tbnz(const Register& rt, unsigned bit_pos, Label* label);
1005 void tbnz(const Register& rt, unsigned bit_pos, int imm14);
1006
1007 // Address calculation instructions.
1008 // Calculate a PC-relative address. Unlike for branches the offset in adr is
1009 // unscaled (i.e. the result can be unaligned).
1010 void adr(const Register& rd, Label* label);
1011 void adr(const Register& rd, int imm21);
1012
1013 // Data Processing instructions.
1014 // Add.
1015 void add(const Register& rd,
1016 const Register& rn,
1017 const Operand& operand);
1018
1019 // Add and update status flags.
1020 void adds(const Register& rd,
1021 const Register& rn,
1022 const Operand& operand);
1023
1024 // Compare negative.
1025 void cmn(const Register& rn, const Operand& operand);
1026
1027 // Subtract.
1028 void sub(const Register& rd,
1029 const Register& rn,
1030 const Operand& operand);
1031
1032 // Subtract and update status flags.
1033 void subs(const Register& rd,
1034 const Register& rn,
1035 const Operand& operand);
1036
1037 // Compare.
1038 void cmp(const Register& rn, const Operand& operand);
1039
1040 // Negate.
1041 void neg(const Register& rd,
1042 const Operand& operand);
1043
1044 // Negate and update status flags.
1045 void negs(const Register& rd,
1046 const Operand& operand);
1047
1048 // Add with carry bit.
1049 void adc(const Register& rd,
1050 const Register& rn,
1051 const Operand& operand);
1052
1053 // Add with carry bit and update status flags.
1054 void adcs(const Register& rd,
1055 const Register& rn,
1056 const Operand& operand);
1057
1058 // Subtract with carry bit.
1059 void sbc(const Register& rd,
1060 const Register& rn,
1061 const Operand& operand);
1062
1063 // Subtract with carry bit and update status flags.
1064 void sbcs(const Register& rd,
1065 const Register& rn,
1066 const Operand& operand);
1067
1068 // Negate with carry bit.
1069 void ngc(const Register& rd,
1070 const Operand& operand);
1071
1072 // Negate with carry bit and update status flags.
1073 void ngcs(const Register& rd,
1074 const Operand& operand);
1075
1076 // Logical instructions.
1077 // Bitwise and (A & B).
1078 void and_(const Register& rd,
1079 const Register& rn,
1080 const Operand& operand);
1081
1082 // Bitwise and (A & B) and update status flags.
1083 void ands(const Register& rd,
1084 const Register& rn,
1085 const Operand& operand);
1086
1087 // Bit test, and set flags.
1088 void tst(const Register& rn, const Operand& operand);
1089
1090 // Bit clear (A & ~B).
1091 void bic(const Register& rd,
1092 const Register& rn,
1093 const Operand& operand);
1094
1095 // Bit clear (A & ~B) and update status flags.
1096 void bics(const Register& rd,
1097 const Register& rn,
1098 const Operand& operand);
1099
1100 // Bitwise or (A | B).
1101 void orr(const Register& rd, const Register& rn, const Operand& operand);
1102
1103 // Bitwise nor (A | ~B).
1104 void orn(const Register& rd, const Register& rn, const Operand& operand);
1105
1106 // Bitwise eor/xor (A ^ B).
1107 void eor(const Register& rd, const Register& rn, const Operand& operand);
1108
1109 // Bitwise enor/xnor (A ^ ~B).
1110 void eon(const Register& rd, const Register& rn, const Operand& operand);
1111
1112 // Logical shift left variable.
1113 void lslv(const Register& rd, const Register& rn, const Register& rm);
1114
1115 // Logical shift right variable.
1116 void lsrv(const Register& rd, const Register& rn, const Register& rm);
1117
1118 // Arithmetic shift right variable.
1119 void asrv(const Register& rd, const Register& rn, const Register& rm);
1120
1121 // Rotate right variable.
1122 void rorv(const Register& rd, const Register& rn, const Register& rm);
1123
1124 // Bitfield instructions.
1125 // Bitfield move.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001126 void bfm(const Register& rd, const Register& rn, int immr, int imms);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001127
1128 // Signed bitfield move.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001129 void sbfm(const Register& rd, const Register& rn, int immr, int imms);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001130
1131 // Unsigned bitfield move.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001132 void ubfm(const Register& rd, const Register& rn, int immr, int imms);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001133
1134 // Bfm aliases.
1135 // Bitfield insert.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001136 void bfi(const Register& rd, const Register& rn, int lsb, int width) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001137 DCHECK(width >= 1);
1138 DCHECK(lsb + width <= rn.SizeInBits());
1139 bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1140 }
1141
1142 // Bitfield extract and insert low.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001143 void bfxil(const Register& rd, const Register& rn, int lsb, int width) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001144 DCHECK(width >= 1);
1145 DCHECK(lsb + width <= rn.SizeInBits());
1146 bfm(rd, rn, lsb, lsb + width - 1);
1147 }
1148
1149 // Sbfm aliases.
1150 // Arithmetic shift right.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001151 void asr(const Register& rd, const Register& rn, int shift) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001152 DCHECK(shift < rd.SizeInBits());
1153 sbfm(rd, rn, shift, rd.SizeInBits() - 1);
1154 }
1155
1156 // Signed bitfield insert in zero.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001157 void sbfiz(const Register& rd, const Register& rn, int lsb, int width) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001158 DCHECK(width >= 1);
1159 DCHECK(lsb + width <= rn.SizeInBits());
1160 sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1161 }
1162
1163 // Signed bitfield extract.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001164 void sbfx(const Register& rd, const Register& rn, int lsb, int width) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001165 DCHECK(width >= 1);
1166 DCHECK(lsb + width <= rn.SizeInBits());
1167 sbfm(rd, rn, lsb, lsb + width - 1);
1168 }
1169
1170 // Signed extend byte.
1171 void sxtb(const Register& rd, const Register& rn) {
1172 sbfm(rd, rn, 0, 7);
1173 }
1174
1175 // Signed extend halfword.
1176 void sxth(const Register& rd, const Register& rn) {
1177 sbfm(rd, rn, 0, 15);
1178 }
1179
1180 // Signed extend word.
1181 void sxtw(const Register& rd, const Register& rn) {
1182 sbfm(rd, rn, 0, 31);
1183 }
1184
1185 // Ubfm aliases.
1186 // Logical shift left.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001187 void lsl(const Register& rd, const Register& rn, int shift) {
1188 int reg_size = rd.SizeInBits();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001189 DCHECK(shift < reg_size);
1190 ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
1191 }
1192
1193 // Logical shift right.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001194 void lsr(const Register& rd, const Register& rn, int shift) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001195 DCHECK(shift < rd.SizeInBits());
1196 ubfm(rd, rn, shift, rd.SizeInBits() - 1);
1197 }
1198
1199 // Unsigned bitfield insert in zero.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001200 void ubfiz(const Register& rd, const Register& rn, int lsb, int width) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001201 DCHECK(width >= 1);
1202 DCHECK(lsb + width <= rn.SizeInBits());
1203 ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1204 }
1205
1206 // Unsigned bitfield extract.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001207 void ubfx(const Register& rd, const Register& rn, int lsb, int width) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001208 DCHECK(width >= 1);
1209 DCHECK(lsb + width <= rn.SizeInBits());
1210 ubfm(rd, rn, lsb, lsb + width - 1);
1211 }
1212
1213 // Unsigned extend byte.
1214 void uxtb(const Register& rd, const Register& rn) {
1215 ubfm(rd, rn, 0, 7);
1216 }
1217
1218 // Unsigned extend halfword.
1219 void uxth(const Register& rd, const Register& rn) {
1220 ubfm(rd, rn, 0, 15);
1221 }
1222
1223 // Unsigned extend word.
1224 void uxtw(const Register& rd, const Register& rn) {
1225 ubfm(rd, rn, 0, 31);
1226 }
1227
1228 // Extract.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001229 void extr(const Register& rd, const Register& rn, const Register& rm,
1230 int lsb);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001231
1232 // Conditional select: rd = cond ? rn : rm.
1233 void csel(const Register& rd,
1234 const Register& rn,
1235 const Register& rm,
1236 Condition cond);
1237
1238 // Conditional select increment: rd = cond ? rn : rm + 1.
1239 void csinc(const Register& rd,
1240 const Register& rn,
1241 const Register& rm,
1242 Condition cond);
1243
1244 // Conditional select inversion: rd = cond ? rn : ~rm.
1245 void csinv(const Register& rd,
1246 const Register& rn,
1247 const Register& rm,
1248 Condition cond);
1249
1250 // Conditional select negation: rd = cond ? rn : -rm.
1251 void csneg(const Register& rd,
1252 const Register& rn,
1253 const Register& rm,
1254 Condition cond);
1255
1256 // Conditional set: rd = cond ? 1 : 0.
1257 void cset(const Register& rd, Condition cond);
1258
1259 // Conditional set minus: rd = cond ? -1 : 0.
1260 void csetm(const Register& rd, Condition cond);
1261
1262 // Conditional increment: rd = cond ? rn + 1 : rn.
1263 void cinc(const Register& rd, const Register& rn, Condition cond);
1264
1265 // Conditional invert: rd = cond ? ~rn : rn.
1266 void cinv(const Register& rd, const Register& rn, Condition cond);
1267
1268 // Conditional negate: rd = cond ? -rn : rn.
1269 void cneg(const Register& rd, const Register& rn, Condition cond);
1270
1271 // Extr aliases.
1272 void ror(const Register& rd, const Register& rs, unsigned shift) {
1273 extr(rd, rs, rs, shift);
1274 }
1275
1276 // Conditional comparison.
1277 // Conditional compare negative.
1278 void ccmn(const Register& rn,
1279 const Operand& operand,
1280 StatusFlags nzcv,
1281 Condition cond);
1282
1283 // Conditional compare.
1284 void ccmp(const Register& rn,
1285 const Operand& operand,
1286 StatusFlags nzcv,
1287 Condition cond);
1288
1289 // Multiplication.
1290 // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
1291 void mul(const Register& rd, const Register& rn, const Register& rm);
1292
1293 // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
1294 void madd(const Register& rd,
1295 const Register& rn,
1296 const Register& rm,
1297 const Register& ra);
1298
1299 // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
1300 void mneg(const Register& rd, const Register& rn, const Register& rm);
1301
1302 // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
1303 void msub(const Register& rd,
1304 const Register& rn,
1305 const Register& rm,
1306 const Register& ra);
1307
1308 // 32 x 32 -> 64-bit multiply.
1309 void smull(const Register& rd, const Register& rn, const Register& rm);
1310
1311 // Xd = bits<127:64> of Xn * Xm.
1312 void smulh(const Register& rd, const Register& rn, const Register& rm);
1313
1314 // Signed 32 x 32 -> 64-bit multiply and accumulate.
1315 void smaddl(const Register& rd,
1316 const Register& rn,
1317 const Register& rm,
1318 const Register& ra);
1319
1320 // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
1321 void umaddl(const Register& rd,
1322 const Register& rn,
1323 const Register& rm,
1324 const Register& ra);
1325
1326 // Signed 32 x 32 -> 64-bit multiply and subtract.
1327 void smsubl(const Register& rd,
1328 const Register& rn,
1329 const Register& rm,
1330 const Register& ra);
1331
1332 // Unsigned 32 x 32 -> 64-bit multiply and subtract.
1333 void umsubl(const Register& rd,
1334 const Register& rn,
1335 const Register& rm,
1336 const Register& ra);
1337
1338 // Signed integer divide.
1339 void sdiv(const Register& rd, const Register& rn, const Register& rm);
1340
1341 // Unsigned integer divide.
1342 void udiv(const Register& rd, const Register& rn, const Register& rm);
1343
1344 // Bit count, bit reverse and endian reverse.
1345 void rbit(const Register& rd, const Register& rn);
1346 void rev16(const Register& rd, const Register& rn);
1347 void rev32(const Register& rd, const Register& rn);
1348 void rev(const Register& rd, const Register& rn);
1349 void clz(const Register& rd, const Register& rn);
1350 void cls(const Register& rd, const Register& rn);
1351
1352 // Memory instructions.
1353
1354 // Load integer or FP register.
1355 void ldr(const CPURegister& rt, const MemOperand& src);
1356
1357 // Store integer or FP register.
1358 void str(const CPURegister& rt, const MemOperand& dst);
1359
1360 // Load word with sign extension.
1361 void ldrsw(const Register& rt, const MemOperand& src);
1362
1363 // Load byte.
1364 void ldrb(const Register& rt, const MemOperand& src);
1365
1366 // Store byte.
1367 void strb(const Register& rt, const MemOperand& dst);
1368
1369 // Load byte with sign extension.
1370 void ldrsb(const Register& rt, const MemOperand& src);
1371
1372 // Load half-word.
1373 void ldrh(const Register& rt, const MemOperand& src);
1374
1375 // Store half-word.
1376 void strh(const Register& rt, const MemOperand& dst);
1377
1378 // Load half-word with sign extension.
1379 void ldrsh(const Register& rt, const MemOperand& src);
1380
1381 // Load integer or FP register pair.
1382 void ldp(const CPURegister& rt, const CPURegister& rt2,
1383 const MemOperand& src);
1384
1385 // Store integer or FP register pair.
1386 void stp(const CPURegister& rt, const CPURegister& rt2,
1387 const MemOperand& dst);
1388
1389 // Load word pair with sign extension.
1390 void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
1391
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001392 // Load literal to register from a pc relative address.
1393 void ldr_pcrel(const CPURegister& rt, int imm19);
1394
1395 // Load literal to register.
1396 void ldr(const CPURegister& rt, const Immediate& imm);
1397
1398 // Move instructions. The default shift of -1 indicates that the move
1399 // instruction will calculate an appropriate 16-bit immediate and left shift
1400 // that is equal to the 64-bit immediate argument. If an explicit left shift
1401 // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
1402 //
1403 // For movk, an explicit shift can be used to indicate which half word should
1404 // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
1405 // half word with zero, whereas movk(x0, 0, 48) will overwrite the
1406 // most-significant.
1407
1408 // Move and keep.
1409 void movk(const Register& rd, uint64_t imm, int shift = -1) {
1410 MoveWide(rd, imm, shift, MOVK);
1411 }
1412
1413 // Move with non-zero.
1414 void movn(const Register& rd, uint64_t imm, int shift = -1) {
1415 MoveWide(rd, imm, shift, MOVN);
1416 }
1417
1418 // Move with zero.
1419 void movz(const Register& rd, uint64_t imm, int shift = -1) {
1420 MoveWide(rd, imm, shift, MOVZ);
1421 }
1422
1423 // Misc instructions.
1424 // Monitor debug-mode breakpoint.
1425 void brk(int code);
1426
1427 // Halting debug-mode breakpoint.
1428 void hlt(int code);
1429
1430 // Move register to register.
1431 void mov(const Register& rd, const Register& rn);
1432
1433 // Move NOT(operand) to register.
1434 void mvn(const Register& rd, const Operand& operand);
1435
1436 // System instructions.
1437 // Move to register from system register.
1438 void mrs(const Register& rt, SystemRegister sysreg);
1439
1440 // Move from register to system register.
1441 void msr(SystemRegister sysreg, const Register& rt);
1442
1443 // System hint.
1444 void hint(SystemHint code);
1445
1446 // Data memory barrier
1447 void dmb(BarrierDomain domain, BarrierType type);
1448
1449 // Data synchronization barrier
1450 void dsb(BarrierDomain domain, BarrierType type);
1451
1452 // Instruction synchronization barrier
1453 void isb();
1454
1455 // Alias for system instructions.
1456 void nop() { hint(NOP); }
1457
1458 // Different nop operations are used by the code generator to detect certain
1459 // states of the generated code.
1460 enum NopMarkerTypes {
1461 DEBUG_BREAK_NOP,
1462 INTERRUPT_CODE_NOP,
1463 ADR_FAR_NOP,
1464 FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
1465 LAST_NOP_MARKER = ADR_FAR_NOP
1466 };
1467
1468 void nop(NopMarkerTypes n) {
1469 DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
1470 mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
1471 }
1472
1473 // FP instructions.
1474 // Move immediate to FP register.
1475 void fmov(FPRegister fd, double imm);
1476 void fmov(FPRegister fd, float imm);
1477
1478 // Move FP register to register.
1479 void fmov(Register rd, FPRegister fn);
1480
1481 // Move register to FP register.
1482 void fmov(FPRegister fd, Register rn);
1483
1484 // Move FP register to FP register.
1485 void fmov(FPRegister fd, FPRegister fn);
1486
1487 // FP add.
1488 void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1489
1490 // FP subtract.
1491 void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1492
1493 // FP multiply.
1494 void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1495
1496 // FP fused multiply and add.
1497 void fmadd(const FPRegister& fd,
1498 const FPRegister& fn,
1499 const FPRegister& fm,
1500 const FPRegister& fa);
1501
1502 // FP fused multiply and subtract.
1503 void fmsub(const FPRegister& fd,
1504 const FPRegister& fn,
1505 const FPRegister& fm,
1506 const FPRegister& fa);
1507
1508 // FP fused multiply, add and negate.
1509 void fnmadd(const FPRegister& fd,
1510 const FPRegister& fn,
1511 const FPRegister& fm,
1512 const FPRegister& fa);
1513
1514 // FP fused multiply, subtract and negate.
1515 void fnmsub(const FPRegister& fd,
1516 const FPRegister& fn,
1517 const FPRegister& fm,
1518 const FPRegister& fa);
1519
1520 // FP divide.
1521 void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1522
1523 // FP maximum.
1524 void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1525
1526 // FP minimum.
1527 void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1528
1529 // FP maximum.
1530 void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1531
1532 // FP minimum.
1533 void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1534
1535 // FP absolute.
1536 void fabs(const FPRegister& fd, const FPRegister& fn);
1537
1538 // FP negate.
1539 void fneg(const FPRegister& fd, const FPRegister& fn);
1540
1541 // FP square root.
1542 void fsqrt(const FPRegister& fd, const FPRegister& fn);
1543
1544 // FP round to integer (nearest with ties to away).
1545 void frinta(const FPRegister& fd, const FPRegister& fn);
1546
1547 // FP round to integer (toward minus infinity).
1548 void frintm(const FPRegister& fd, const FPRegister& fn);
1549
1550 // FP round to integer (nearest with ties to even).
1551 void frintn(const FPRegister& fd, const FPRegister& fn);
1552
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001553 // FP round to integer (towards plus infinity).
1554 void frintp(const FPRegister& fd, const FPRegister& fn);
1555
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001556 // FP round to integer (towards zero.)
1557 void frintz(const FPRegister& fd, const FPRegister& fn);
1558
1559 // FP compare registers.
1560 void fcmp(const FPRegister& fn, const FPRegister& fm);
1561
1562 // FP compare immediate.
1563 void fcmp(const FPRegister& fn, double value);
1564
1565 // FP conditional compare.
1566 void fccmp(const FPRegister& fn,
1567 const FPRegister& fm,
1568 StatusFlags nzcv,
1569 Condition cond);
1570
1571 // FP conditional select.
1572 void fcsel(const FPRegister& fd,
1573 const FPRegister& fn,
1574 const FPRegister& fm,
1575 Condition cond);
1576
1577 // Common FP Convert function
1578 void FPConvertToInt(const Register& rd,
1579 const FPRegister& fn,
1580 FPIntegerConvertOp op);
1581
1582 // FP convert between single and double precision.
1583 void fcvt(const FPRegister& fd, const FPRegister& fn);
1584
1585 // Convert FP to unsigned integer (nearest with ties to away).
1586 void fcvtau(const Register& rd, const FPRegister& fn);
1587
1588 // Convert FP to signed integer (nearest with ties to away).
1589 void fcvtas(const Register& rd, const FPRegister& fn);
1590
1591 // Convert FP to unsigned integer (round towards -infinity).
1592 void fcvtmu(const Register& rd, const FPRegister& fn);
1593
1594 // Convert FP to signed integer (round towards -infinity).
1595 void fcvtms(const Register& rd, const FPRegister& fn);
1596
1597 // Convert FP to unsigned integer (nearest with ties to even).
1598 void fcvtnu(const Register& rd, const FPRegister& fn);
1599
1600 // Convert FP to signed integer (nearest with ties to even).
1601 void fcvtns(const Register& rd, const FPRegister& fn);
1602
1603 // Convert FP to unsigned integer (round towards zero).
1604 void fcvtzu(const Register& rd, const FPRegister& fn);
1605
1606 // Convert FP to signed integer (rounf towards zero).
1607 void fcvtzs(const Register& rd, const FPRegister& fn);
1608
1609 // Convert signed integer or fixed point to FP.
1610 void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1611
1612 // Convert unsigned integer or fixed point to FP.
1613 void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1614
1615 // Instruction functions used only for test, debug, and patching.
1616 // Emit raw instructions in the instruction stream.
1617 void dci(Instr raw_inst) { Emit(raw_inst); }
1618
1619 // Emit 8 bits of data in the instruction stream.
1620 void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
1621
1622 // Emit 32 bits of data in the instruction stream.
1623 void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
1624
1625 // Emit 64 bits of data in the instruction stream.
1626 void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
1627
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001628 // Emit an address in the instruction stream.
1629 void dcptr(Label* label);
1630
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001631 // Copy a string into the instruction stream, including the terminating NULL
1632 // character. The instruction pointer (pc_) is then aligned correctly for
1633 // subsequent instructions.
1634 void EmitStringData(const char* string);
1635
1636 // Pseudo-instructions ------------------------------------------------------
1637
1638 // Parameters are described in arm64/instructions-arm64.h.
1639 void debug(const char* message, uint32_t code, Instr params = BREAK);
1640
1641 // Required by V8.
1642 void dd(uint32_t data) { dc32(data); }
1643 void db(uint8_t data) { dc8(data); }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001644 void dq(uint64_t data) { dc64(data); }
1645 void dp(uintptr_t data) { dc64(data); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001646
1647 // Code generation helpers --------------------------------------------------
1648
1649 bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
1650
1651 Instruction* pc() const { return Instruction::Cast(pc_); }
1652
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001653 Instruction* InstructionAt(ptrdiff_t offset) const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001654 return reinterpret_cast<Instruction*>(buffer_ + offset);
1655 }
1656
1657 ptrdiff_t InstructionOffset(Instruction* instr) const {
1658 return reinterpret_cast<byte*>(instr) - buffer_;
1659 }
1660
1661 // Register encoding.
1662 static Instr Rd(CPURegister rd) {
1663 DCHECK(rd.code() != kSPRegInternalCode);
1664 return rd.code() << Rd_offset;
1665 }
1666
1667 static Instr Rn(CPURegister rn) {
1668 DCHECK(rn.code() != kSPRegInternalCode);
1669 return rn.code() << Rn_offset;
1670 }
1671
1672 static Instr Rm(CPURegister rm) {
1673 DCHECK(rm.code() != kSPRegInternalCode);
1674 return rm.code() << Rm_offset;
1675 }
1676
1677 static Instr Ra(CPURegister ra) {
1678 DCHECK(ra.code() != kSPRegInternalCode);
1679 return ra.code() << Ra_offset;
1680 }
1681
1682 static Instr Rt(CPURegister rt) {
1683 DCHECK(rt.code() != kSPRegInternalCode);
1684 return rt.code() << Rt_offset;
1685 }
1686
1687 static Instr Rt2(CPURegister rt2) {
1688 DCHECK(rt2.code() != kSPRegInternalCode);
1689 return rt2.code() << Rt2_offset;
1690 }
1691
1692 // These encoding functions allow the stack pointer to be encoded, and
1693 // disallow the zero register.
1694 static Instr RdSP(Register rd) {
1695 DCHECK(!rd.IsZero());
1696 return (rd.code() & kRegCodeMask) << Rd_offset;
1697 }
1698
1699 static Instr RnSP(Register rn) {
1700 DCHECK(!rn.IsZero());
1701 return (rn.code() & kRegCodeMask) << Rn_offset;
1702 }
1703
1704 // Flags encoding.
1705 inline static Instr Flags(FlagsUpdate S);
1706 inline static Instr Cond(Condition cond);
1707
1708 // PC-relative address encoding.
1709 inline static Instr ImmPCRelAddress(int imm21);
1710
1711 // Branch encoding.
1712 inline static Instr ImmUncondBranch(int imm26);
1713 inline static Instr ImmCondBranch(int imm19);
1714 inline static Instr ImmCmpBranch(int imm19);
1715 inline static Instr ImmTestBranch(int imm14);
1716 inline static Instr ImmTestBranchBit(unsigned bit_pos);
1717
1718 // Data Processing encoding.
1719 inline static Instr SF(Register rd);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001720 inline static Instr ImmAddSub(int imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001721 inline static Instr ImmS(unsigned imms, unsigned reg_size);
1722 inline static Instr ImmR(unsigned immr, unsigned reg_size);
1723 inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
1724 inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
1725 inline static Instr ImmLLiteral(int imm19);
1726 inline static Instr BitN(unsigned bitn, unsigned reg_size);
1727 inline static Instr ShiftDP(Shift shift);
1728 inline static Instr ImmDPShift(unsigned amount);
1729 inline static Instr ExtendMode(Extend extend);
1730 inline static Instr ImmExtendShift(unsigned left_shift);
1731 inline static Instr ImmCondCmp(unsigned imm);
1732 inline static Instr Nzcv(StatusFlags nzcv);
1733
1734 static bool IsImmAddSub(int64_t immediate);
1735 static bool IsImmLogical(uint64_t value,
1736 unsigned width,
1737 unsigned* n,
1738 unsigned* imm_s,
1739 unsigned* imm_r);
1740
1741 // MemOperand offset encoding.
1742 inline static Instr ImmLSUnsigned(int imm12);
1743 inline static Instr ImmLS(int imm9);
1744 inline static Instr ImmLSPair(int imm7, LSDataSize size);
1745 inline static Instr ImmShiftLS(unsigned shift_amount);
1746 inline static Instr ImmException(int imm16);
1747 inline static Instr ImmSystemRegister(int imm15);
1748 inline static Instr ImmHint(int imm7);
1749 inline static Instr ImmBarrierDomain(int imm2);
1750 inline static Instr ImmBarrierType(int imm2);
1751 inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
1752
1753 static bool IsImmLSUnscaled(int64_t offset);
1754 static bool IsImmLSScaled(int64_t offset, LSDataSize size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001755 static bool IsImmLLiteral(int64_t offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001756
1757 // Move immediates encoding.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001758 inline static Instr ImmMoveWide(int imm);
1759 inline static Instr ShiftMoveWide(int shift);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001760
1761 // FP Immediates.
1762 static Instr ImmFP32(float imm);
1763 static Instr ImmFP64(double imm);
1764 inline static Instr FPScale(unsigned scale);
1765
1766 // FP register type.
1767 inline static Instr FPType(FPRegister fd);
1768
1769 // Class for scoping postponing the constant pool generation.
1770 class BlockConstPoolScope {
1771 public:
1772 explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1773 assem_->StartBlockConstPool();
1774 }
1775 ~BlockConstPoolScope() {
1776 assem_->EndBlockConstPool();
1777 }
1778
1779 private:
1780 Assembler* assem_;
1781
1782 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1783 };
1784
1785 // Check if is time to emit a constant pool.
1786 void CheckConstPool(bool force_emit, bool require_jump);
1787
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001788 void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
1789 ConstantPoolEntry::Access access,
1790 ConstantPoolEntry::Type type) {
1791 // No embedded constant pool support.
1792 UNREACHABLE();
1793 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001794
1795 // Returns true if we should emit a veneer as soon as possible for a branch
1796 // which can at most reach to specified pc.
1797 bool ShouldEmitVeneer(int max_reachable_pc,
1798 int margin = kVeneerDistanceMargin);
1799 bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
1800 return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
1801 }
1802
1803 // The maximum code size generated for a veneer. Currently one branch
1804 // instruction. This is for code size checking purposes, and can be extended
1805 // in the future for example if we decide to add nops between the veneers.
1806 static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
1807
1808 void RecordVeneerPool(int location_offset, int size);
1809 // Emits veneers for branches that are approaching their maximum range.
1810 // If need_protection is true, the veneers are protected by a branch jumping
1811 // over the code.
1812 void EmitVeneers(bool force_emit, bool need_protection,
1813 int margin = kVeneerDistanceMargin);
1814 void EmitVeneersGuard() { EmitPoolGuard(); }
1815 // Checks whether veneers need to be emitted at this point.
1816 // If force_emit is set, a veneer is generated for *all* unresolved branches.
1817 void CheckVeneerPool(bool force_emit, bool require_jump,
1818 int margin = kVeneerDistanceMargin);
1819
1820 class BlockPoolsScope {
1821 public:
1822 explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
1823 assem_->StartBlockPools();
1824 }
1825 ~BlockPoolsScope() {
1826 assem_->EndBlockPools();
1827 }
1828
1829 private:
1830 Assembler* assem_;
1831
1832 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
1833 };
1834
1835 protected:
1836 inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
1837
1838 void LoadStore(const CPURegister& rt,
1839 const MemOperand& addr,
1840 LoadStoreOp op);
1841
1842 void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
1843 const MemOperand& addr, LoadStorePairOp op);
1844 static bool IsImmLSPair(int64_t offset, LSDataSize size);
1845
1846 void Logical(const Register& rd,
1847 const Register& rn,
1848 const Operand& operand,
1849 LogicalOp op);
1850 void LogicalImmediate(const Register& rd,
1851 const Register& rn,
1852 unsigned n,
1853 unsigned imm_s,
1854 unsigned imm_r,
1855 LogicalOp op);
1856
1857 void ConditionalCompare(const Register& rn,
1858 const Operand& operand,
1859 StatusFlags nzcv,
1860 Condition cond,
1861 ConditionalCompareOp op);
1862 static bool IsImmConditionalCompare(int64_t immediate);
1863
1864 void AddSubWithCarry(const Register& rd,
1865 const Register& rn,
1866 const Operand& operand,
1867 FlagsUpdate S,
1868 AddSubWithCarryOp op);
1869
1870 // Functions for emulating operands not directly supported by the instruction
1871 // set.
1872 void EmitShift(const Register& rd,
1873 const Register& rn,
1874 Shift shift,
1875 unsigned amount);
1876 void EmitExtendShift(const Register& rd,
1877 const Register& rn,
1878 Extend extend,
1879 unsigned left_shift);
1880
1881 void AddSub(const Register& rd,
1882 const Register& rn,
1883 const Operand& operand,
1884 FlagsUpdate S,
1885 AddSubOp op);
1886
1887 static bool IsImmFP32(float imm);
1888 static bool IsImmFP64(double imm);
1889
1890 // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
1891 // registers. Only simple loads are supported; sign- and zero-extension (such
1892 // as in LDPSW_x or LDRB_w) are not supported.
1893 static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
1894 static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
1895 const CPURegister& rt2);
1896 static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
1897 static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
1898 const CPURegister& rt2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001899 static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
1900
1901 // Remove the specified branch from the unbound label link chain.
1902 // If available, a veneer for this label can be used for other branches in the
1903 // chain if the link chain cannot be fixed up without this branch.
1904 void RemoveBranchFromLabelLinkChain(Instruction* branch,
1905 Label* label,
1906 Instruction* label_veneer = NULL);
1907
1908 private:
1909 // Instruction helpers.
1910 void MoveWide(const Register& rd,
1911 uint64_t imm,
1912 int shift,
1913 MoveWideImmediateOp mov_op);
1914 void DataProcShiftedRegister(const Register& rd,
1915 const Register& rn,
1916 const Operand& operand,
1917 FlagsUpdate S,
1918 Instr op);
1919 void DataProcExtendedRegister(const Register& rd,
1920 const Register& rn,
1921 const Operand& operand,
1922 FlagsUpdate S,
1923 Instr op);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001924 void ConditionalSelect(const Register& rd,
1925 const Register& rn,
1926 const Register& rm,
1927 Condition cond,
1928 ConditionalSelectOp op);
1929 void DataProcessing1Source(const Register& rd,
1930 const Register& rn,
1931 DataProcessing1SourceOp op);
1932 void DataProcessing3Source(const Register& rd,
1933 const Register& rn,
1934 const Register& rm,
1935 const Register& ra,
1936 DataProcessing3SourceOp op);
1937 void FPDataProcessing1Source(const FPRegister& fd,
1938 const FPRegister& fn,
1939 FPDataProcessing1SourceOp op);
1940 void FPDataProcessing2Source(const FPRegister& fd,
1941 const FPRegister& fn,
1942 const FPRegister& fm,
1943 FPDataProcessing2SourceOp op);
1944 void FPDataProcessing3Source(const FPRegister& fd,
1945 const FPRegister& fn,
1946 const FPRegister& fm,
1947 const FPRegister& fa,
1948 FPDataProcessing3SourceOp op);
1949
1950 // Label helpers.
1951
1952 // Return an offset for a label-referencing instruction, typically a branch.
1953 int LinkAndGetByteOffsetTo(Label* label);
1954
1955 // This is the same as LinkAndGetByteOffsetTo, but return an offset
1956 // suitable for fields that take instruction offsets.
1957 inline int LinkAndGetInstructionOffsetTo(Label* label);
1958
1959 static const int kStartOfLabelLinkChain = 0;
1960
1961 // Verify that a label's link chain is intact.
1962 void CheckLabelLinkChain(Label const * label);
1963
1964 void RecordLiteral(int64_t imm, unsigned size);
1965
1966 // Postpone the generation of the constant pool for the specified number of
1967 // instructions.
1968 void BlockConstPoolFor(int instructions);
1969
1970 // Set how far from current pc the next constant pool check will be.
1971 void SetNextConstPoolCheckIn(int instructions) {
1972 next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
1973 }
1974
1975 // Emit the instruction at pc_.
1976 void Emit(Instr instruction) {
1977 STATIC_ASSERT(sizeof(*pc_) == 1);
1978 STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
1979 DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
1980
1981 memcpy(pc_, &instruction, sizeof(instruction));
1982 pc_ += sizeof(instruction);
1983 CheckBuffer();
1984 }
1985
1986 // Emit data inline in the instruction stream.
1987 void EmitData(void const * data, unsigned size) {
1988 DCHECK(sizeof(*pc_) == 1);
1989 DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
1990
1991 // TODO(all): Somehow register we have some data here. Then we can
1992 // disassemble it correctly.
1993 memcpy(pc_, data, size);
1994 pc_ += size;
1995 CheckBuffer();
1996 }
1997
1998 void GrowBuffer();
1999 void CheckBufferSpace();
2000 void CheckBuffer();
2001
2002 // Pc offset of the next constant pool check.
2003 int next_constant_pool_check_;
2004
2005 // Constant pool generation
2006 // Pools are emitted in the instruction stream. They are emitted when:
2007 // * the distance to the first use is above a pre-defined distance or
2008 // * the numbers of entries in the pool is above a pre-defined size or
2009 // * code generation is finished
2010 // If a pool needs to be emitted before code generation is finished a branch
2011 // over the emitted pool will be inserted.
2012
2013 // Constants in the pool may be addresses of functions that gets relocated;
2014 // if so, a relocation info entry is associated to the constant pool entry.
2015
2016 // Repeated checking whether the constant pool should be emitted is rather
2017 // expensive. By default we only check again once a number of instructions
2018 // has been generated. That also means that the sizing of the buffers is not
2019 // an exact science, and that we rely on some slop to not overrun buffers.
2020 static const int kCheckConstPoolInterval = 128;
2021
2022 // Distance to first use after a which a pool will be emitted. Pool entries
2023 // are accessed with pc relative load therefore this cannot be more than
2024 // 1 * MB. Since constant pool emission checks are interval based this value
2025 // is an approximation.
2026 static const int kApproxMaxDistToConstPool = 64 * KB;
2027
2028 // Number of pool entries after which a pool will be emitted. Since constant
2029 // pool emission checks are interval based this value is an approximation.
2030 static const int kApproxMaxPoolEntryCount = 512;
2031
2032 // Emission of the constant pool may be blocked in some code sequences.
2033 int const_pool_blocked_nesting_; // Block emission if this is not zero.
2034 int no_const_pool_before_; // Block emission before this pc offset.
2035
2036 // Emission of the veneer pools may be blocked in some code sequences.
2037 int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
2038
2039 // Relocation info generation
2040 // Each relocation is encoded as a variable size value
2041 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
2042 RelocInfoWriter reloc_info_writer;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002043 // Internal reference positions, required for (potential) patching in
2044 // GrowBuffer(); contains only those internal references whose labels
2045 // are already bound.
2046 std::deque<int> internal_reference_positions_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002047
2048 // Relocation info records are also used during code generation as temporary
2049 // containers for constants and code target addresses until they are emitted
2050 // to the constant pool. These pending relocation info records are temporarily
2051 // stored in a separate buffer until a constant pool is emitted.
2052 // If every instruction in a long sequence is accessing the pool, we need one
2053 // pending relocation entry per instruction.
2054
2055 // The pending constant pool.
2056 ConstPool constpool_;
2057
2058 // Relocation for a type-recording IC has the AST id added to it. This
2059 // member variable is a way to pass the information from the call site to
2060 // the relocation info.
2061 TypeFeedbackId recorded_ast_id_;
2062
2063 inline TypeFeedbackId RecordedAstId();
2064 inline void ClearRecordedAstId();
2065
2066 protected:
2067 // Record the AST id of the CallIC being compiled, so that it can be placed
2068 // in the relocation information.
2069 void SetRecordedAstId(TypeFeedbackId ast_id) {
2070 DCHECK(recorded_ast_id_.IsNone());
2071 recorded_ast_id_ = ast_id;
2072 }
2073
2074 // Code generation
2075 // The relocation writer's position is at least kGap bytes below the end of
2076 // the generated instructions. This is so that multi-instruction sequences do
2077 // not have to check for overflow. The same is true for writes of large
2078 // relocation info entries, and debug strings encoded in the instruction
2079 // stream.
2080 static const int kGap = 128;
2081
2082 public:
2083 class FarBranchInfo {
2084 public:
2085 FarBranchInfo(int offset, Label* label)
2086 : pc_offset_(offset), label_(label) {}
2087 // Offset of the branch in the code generation buffer.
2088 int pc_offset_;
2089 // The label branched to.
2090 Label* label_;
2091 };
2092
2093 protected:
2094 // Information about unresolved (forward) branches.
2095 // The Assembler is only allowed to delete out-of-date information from here
2096 // after a label is bound. The MacroAssembler uses this information to
2097 // generate veneers.
2098 //
2099 // The second member gives information about the unresolved branch. The first
2100 // member of the pair is the maximum offset that the branch can reach in the
2101 // buffer. The map is sorted according to this reachable offset, allowing to
2102 // easily check when veneers need to be emitted.
2103 // Note that the maximum reachable offset (first member of the pairs) should
2104 // always be positive but has the same type as the return value for
2105 // pc_offset() for convenience.
2106 std::multimap<int, FarBranchInfo> unresolved_branches_;
2107
2108 // We generate a veneer for a branch if we reach within this distance of the
2109 // limit of the range.
2110 static const int kVeneerDistanceMargin = 1 * KB;
2111 // The factor of 2 is a finger in the air guess. With a default margin of
2112 // 1KB, that leaves us an addional 256 instructions to avoid generating a
2113 // protective branch.
2114 static const int kVeneerNoProtectionFactor = 2;
2115 static const int kVeneerDistanceCheckMargin =
2116 kVeneerNoProtectionFactor * kVeneerDistanceMargin;
2117 int unresolved_branches_first_limit() const {
2118 DCHECK(!unresolved_branches_.empty());
2119 return unresolved_branches_.begin()->first;
2120 }
2121 // This is similar to next_constant_pool_check_ and helps reduce the overhead
2122 // of checking for veneer pools.
2123 // It is maintained to the closest unresolved branch limit minus the maximum
2124 // veneer margin (or kMaxInt if there are no unresolved branches).
2125 int next_veneer_pool_check_;
2126
2127 private:
2128 // If a veneer is emitted for a branch instruction, that instruction must be
2129 // removed from the associated label's link chain so that the assembler does
2130 // not later attempt (likely unsuccessfully) to patch it to branch directly to
2131 // the label.
2132 void DeleteUnresolvedBranchInfoForLabel(Label* label);
2133 // This function deletes the information related to the label by traversing
2134 // the label chain, and for each PC-relative instruction in the chain checking
2135 // if pending unresolved information exists. Its complexity is proportional to
2136 // the length of the label chain.
2137 void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
2138
2139 private:
Ben Murdochda12d292016-06-02 14:46:10 +01002140 AssemblerPositionsRecorder positions_recorder_;
2141 friend class AssemblerPositionsRecorder;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002142 friend class EnsureSpace;
2143 friend class ConstPool;
2144};
2145
2146class PatchingAssembler : public Assembler {
2147 public:
2148 // Create an Assembler with a buffer starting at 'start'.
2149 // The buffer size is
2150 // size of instructions to patch + kGap
2151 // Where kGap is the distance from which the Assembler tries to grow the
2152 // buffer.
2153 // If more or fewer instructions than expected are generated or if some
2154 // relocation information takes space in the buffer, the PatchingAssembler
2155 // will crash trying to grow the buffer.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002156 PatchingAssembler(Isolate* isolate, Instruction* start, unsigned count)
2157 : Assembler(isolate, reinterpret_cast<byte*>(start),
2158 count * kInstructionSize + kGap) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002159 StartBlockPools();
2160 }
2161
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002162 PatchingAssembler(Isolate* isolate, byte* start, unsigned count)
2163 : Assembler(isolate, start, count * kInstructionSize + kGap) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002164 // Block constant pool emission.
2165 StartBlockPools();
2166 }
2167
2168 ~PatchingAssembler() {
2169 // Const pool should still be blocked.
2170 DCHECK(is_const_pool_blocked());
2171 EndBlockPools();
2172 // Verify we have generated the number of instruction we expected.
2173 DCHECK((pc_offset() + kGap) == buffer_size_);
2174 // Verify no relocation information has been emitted.
2175 DCHECK(IsConstPoolEmpty());
2176 // Flush the Instruction cache.
2177 size_t length = buffer_size_ - kGap;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002178 Assembler::FlushICache(isolate(), buffer_, length);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002179 }
2180
2181 // See definition of PatchAdrFar() for details.
2182 static const int kAdrFarPatchableNNops = 2;
2183 static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
2184 void PatchAdrFar(int64_t target_offset);
2185};
2186
2187
2188class EnsureSpace BASE_EMBEDDED {
2189 public:
2190 explicit EnsureSpace(Assembler* assembler) {
2191 assembler->CheckBufferSpace();
2192 }
2193};
2194
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002195} // namespace internal
2196} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002197
2198#endif // V8_ARM64_ASSEMBLER_ARM64_H_