blob: 47786eb710a712f6f2f3c7db24d67e15e4b1498f [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_ARM64_ASSEMBLER_ARM64_H_
6#define V8_ARM64_ASSEMBLER_ARM64_H_
7
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00008#include <deque>
Ben Murdochb8a8cc12014-11-26 15:28:44 +00009#include <list>
10#include <map>
11#include <vector>
12
13#include "src/arm64/instructions-arm64.h"
14#include "src/assembler.h"
15#include "src/globals.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000016#include "src/utils.h"
17
18
19namespace v8 {
20namespace internal {
21
22
23// -----------------------------------------------------------------------------
24// Registers.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000025// clang-format off
26#define GENERAL_REGISTER_CODE_LIST(R) \
27 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
28 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
29 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
30 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
Ben Murdochb8a8cc12014-11-26 15:28:44 +000031
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000032#define GENERAL_REGISTERS(R) \
33 R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
34 R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
35 R(x16) R(x17) R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) \
36 R(x24) R(x25) R(x26) R(x27) R(x28) R(x29) R(x30) R(x31)
37
38#define ALLOCATABLE_GENERAL_REGISTERS(R) \
39 R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
40 R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
41 R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27)
42
43#define DOUBLE_REGISTERS(R) \
44 R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
45 R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d15) \
46 R(d16) R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) \
47 R(d24) R(d25) R(d26) R(d27) R(d28) R(d29) R(d30) R(d31)
48
49#define ALLOCATABLE_DOUBLE_REGISTERS(R) \
50 R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
51 R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d16) \
52 R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) R(d24) \
53 R(d25) R(d26) R(d27) R(d28)
54// clang-format on
Ben Murdochb8a8cc12014-11-26 15:28:44 +000055
56static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
57
58
59// Some CPURegister methods can return Register and FPRegister types, so we
60// need to declare them in advance.
61struct Register;
62struct FPRegister;
63
64
65struct CPURegister {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000066 enum Code {
67#define REGISTER_CODE(R) kCode_##R,
68 GENERAL_REGISTERS(REGISTER_CODE)
69#undef REGISTER_CODE
70 kAfterLast,
71 kCode_no_reg = -1
72 };
73
Ben Murdochb8a8cc12014-11-26 15:28:44 +000074 enum RegisterType {
75 // The kInvalid value is used to detect uninitialized static instances,
76 // which are always zero-initialized before any constructors are called.
77 kInvalid = 0,
78 kRegister,
79 kFPRegister,
80 kNoRegister
81 };
82
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000083 static CPURegister Create(int code, int size, RegisterType type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000084 CPURegister r = {code, size, type};
85 return r;
86 }
87
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000088 int code() const;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000089 RegisterType type() const;
90 RegList Bit() const;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000091 int SizeInBits() const;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000092 int SizeInBytes() const;
93 bool Is32Bits() const;
94 bool Is64Bits() const;
95 bool IsValid() const;
96 bool IsValidOrNone() const;
97 bool IsValidRegister() const;
98 bool IsValidFPRegister() const;
99 bool IsNone() const;
100 bool Is(const CPURegister& other) const;
101 bool Aliases(const CPURegister& other) const;
102
103 bool IsZero() const;
104 bool IsSP() const;
105
106 bool IsRegister() const;
107 bool IsFPRegister() const;
108
109 Register X() const;
110 Register W() const;
111 FPRegister D() const;
112 FPRegister S() const;
113
114 bool IsSameSizeAndType(const CPURegister& other) const;
115
116 // V8 compatibility.
117 bool is(const CPURegister& other) const { return Is(other); }
118 bool is_valid() const { return IsValid(); }
119
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000120 int reg_code;
121 int reg_size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000122 RegisterType reg_type;
123};
124
125
126struct Register : public CPURegister {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000127 static Register Create(int code, int size) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000128 return Register(CPURegister::Create(code, size, CPURegister::kRegister));
129 }
130
131 Register() {
132 reg_code = 0;
133 reg_size = 0;
134 reg_type = CPURegister::kNoRegister;
135 }
136
137 explicit Register(const CPURegister& r) {
138 reg_code = r.reg_code;
139 reg_size = r.reg_size;
140 reg_type = r.reg_type;
141 DCHECK(IsValidOrNone());
142 }
143
144 Register(const Register& r) { // NOLINT(runtime/explicit)
145 reg_code = r.reg_code;
146 reg_size = r.reg_size;
147 reg_type = r.reg_type;
148 DCHECK(IsValidOrNone());
149 }
150
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000151 const char* ToString();
152 bool IsAllocatable() const;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000153 bool IsValid() const {
154 DCHECK(IsRegister() || IsNone());
155 return IsValidRegister();
156 }
157
158 static Register XRegFromCode(unsigned code);
159 static Register WRegFromCode(unsigned code);
160
161 // Start of V8 compatibility section ---------------------
162 // These memebers are necessary for compilation.
163 // A few of them may be unused for now.
164
165 static const int kNumRegisters = kNumberOfRegisters;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000166 STATIC_ASSERT(kNumRegisters == Code::kAfterLast);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000167 static int NumRegisters() { return kNumRegisters; }
168
169 // We allow crankshaft to use the following registers:
170 // - x0 to x15
171 // - x18 to x24
172 // - x27 (also context)
173 //
174 // TODO(all): Register x25 is currently free and could be available for
175 // crankshaft, but we don't use it as we might use it as a per function
176 // literal pool pointer in the future.
177 //
178 // TODO(all): Consider storing cp in x25 to have only two ranges.
179 // We split allocatable registers in three ranges called
180 // - "low range"
181 // - "high range"
182 // - "context"
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000183
184 static Register from_code(int code) {
185 // Always return an X register.
186 return Register::Create(code, kXRegSizeInBits);
187 }
188
189 // End of V8 compatibility section -----------------------
190};
191
192
193struct FPRegister : public CPURegister {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000194 enum Code {
195#define REGISTER_CODE(R) kCode_##R,
196 DOUBLE_REGISTERS(REGISTER_CODE)
197#undef REGISTER_CODE
198 kAfterLast,
199 kCode_no_reg = -1
200 };
201
202 static FPRegister Create(int code, int size) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000203 return FPRegister(
204 CPURegister::Create(code, size, CPURegister::kFPRegister));
205 }
206
207 FPRegister() {
208 reg_code = 0;
209 reg_size = 0;
210 reg_type = CPURegister::kNoRegister;
211 }
212
213 explicit FPRegister(const CPURegister& r) {
214 reg_code = r.reg_code;
215 reg_size = r.reg_size;
216 reg_type = r.reg_type;
217 DCHECK(IsValidOrNone());
218 }
219
220 FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
221 reg_code = r.reg_code;
222 reg_size = r.reg_size;
223 reg_type = r.reg_type;
224 DCHECK(IsValidOrNone());
225 }
226
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000227 const char* ToString();
228 bool IsAllocatable() const;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000229 bool IsValid() const {
230 DCHECK(IsFPRegister() || IsNone());
231 return IsValidFPRegister();
232 }
233
234 static FPRegister SRegFromCode(unsigned code);
235 static FPRegister DRegFromCode(unsigned code);
236
237 // Start of V8 compatibility section ---------------------
238 static const int kMaxNumRegisters = kNumberOfFPRegisters;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000239 STATIC_ASSERT(kMaxNumRegisters == Code::kAfterLast);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000240
241 // Crankshaft can use all the FP registers except:
242 // - d15 which is used to keep the 0 double value
243 // - d30 which is used in crankshaft as a double scratch register
244 // - d31 which is used in the MacroAssembler as a double scratch register
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000245 static FPRegister from_code(int code) {
246 // Always return a D register.
247 return FPRegister::Create(code, kDRegSizeInBits);
248 }
249 // End of V8 compatibility section -----------------------
250};
251
252
253STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
254STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
255
256
257#if defined(ARM64_DEFINE_REG_STATICS)
258#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
259 const CPURegister init_##register_class##_##name = {code, size, type}; \
260 const register_class& name = *reinterpret_cast<const register_class*>( \
261 &init_##register_class##_##name)
262#define ALIAS_REGISTER(register_class, alias, name) \
263 const register_class& alias = *reinterpret_cast<const register_class*>( \
264 &init_##register_class##_##name)
265#else
266#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
267 extern const register_class& name
268#define ALIAS_REGISTER(register_class, alias, name) \
269 extern const register_class& alias
270#endif // defined(ARM64_DEFINE_REG_STATICS)
271
272// No*Reg is used to indicate an unused argument, or an error case. Note that
273// these all compare equal (using the Is() method). The Register and FPRegister
274// variants are provided for convenience.
275INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
276INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
277INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
278
279// v8 compatibility.
280INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
281
282#define DEFINE_REGISTERS(N) \
283 INITIALIZE_REGISTER(Register, w##N, N, \
284 kWRegSizeInBits, CPURegister::kRegister); \
285 INITIALIZE_REGISTER(Register, x##N, N, \
286 kXRegSizeInBits, CPURegister::kRegister);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000287GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000288#undef DEFINE_REGISTERS
289
290INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
291 CPURegister::kRegister);
292INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
293 CPURegister::kRegister);
294
295#define DEFINE_FPREGISTERS(N) \
296 INITIALIZE_REGISTER(FPRegister, s##N, N, \
297 kSRegSizeInBits, CPURegister::kFPRegister); \
298 INITIALIZE_REGISTER(FPRegister, d##N, N, \
299 kDRegSizeInBits, CPURegister::kFPRegister);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000300GENERAL_REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000301#undef DEFINE_FPREGISTERS
302
303#undef INITIALIZE_REGISTER
304
305// Registers aliases.
306ALIAS_REGISTER(Register, ip0, x16);
307ALIAS_REGISTER(Register, ip1, x17);
308ALIAS_REGISTER(Register, wip0, w16);
309ALIAS_REGISTER(Register, wip1, w17);
310// Root register.
311ALIAS_REGISTER(Register, root, x26);
312ALIAS_REGISTER(Register, rr, x26);
313// Context pointer register.
314ALIAS_REGISTER(Register, cp, x27);
315// We use a register as a JS stack pointer to overcome the restriction on the
316// architectural SP alignment.
317// We chose x28 because it is contiguous with the other specific purpose
318// registers.
319STATIC_ASSERT(kJSSPCode == 28);
320ALIAS_REGISTER(Register, jssp, x28);
321ALIAS_REGISTER(Register, wjssp, w28);
322ALIAS_REGISTER(Register, fp, x29);
323ALIAS_REGISTER(Register, lr, x30);
324ALIAS_REGISTER(Register, xzr, x31);
325ALIAS_REGISTER(Register, wzr, w31);
326
327// Keeps the 0 double value.
328ALIAS_REGISTER(FPRegister, fp_zero, d15);
329// Crankshaft double scratch register.
330ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
331// MacroAssembler double scratch registers.
332ALIAS_REGISTER(FPRegister, fp_scratch, d30);
333ALIAS_REGISTER(FPRegister, fp_scratch1, d30);
334ALIAS_REGISTER(FPRegister, fp_scratch2, d31);
335
336#undef ALIAS_REGISTER
337
338
339Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
340 Register reg2 = NoReg,
341 Register reg3 = NoReg,
342 Register reg4 = NoReg);
343
344
345// AreAliased returns true if any of the named registers overlap. Arguments set
346// to NoReg are ignored. The system stack pointer may be specified.
347bool AreAliased(const CPURegister& reg1,
348 const CPURegister& reg2,
349 const CPURegister& reg3 = NoReg,
350 const CPURegister& reg4 = NoReg,
351 const CPURegister& reg5 = NoReg,
352 const CPURegister& reg6 = NoReg,
353 const CPURegister& reg7 = NoReg,
354 const CPURegister& reg8 = NoReg);
355
356// AreSameSizeAndType returns true if all of the specified registers have the
357// same size, and are of the same type. The system stack pointer may be
358// specified. Arguments set to NoReg are ignored, as are any subsequent
359// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
360bool AreSameSizeAndType(const CPURegister& reg1,
361 const CPURegister& reg2,
362 const CPURegister& reg3 = NoCPUReg,
363 const CPURegister& reg4 = NoCPUReg,
364 const CPURegister& reg5 = NoCPUReg,
365 const CPURegister& reg6 = NoCPUReg,
366 const CPURegister& reg7 = NoCPUReg,
367 const CPURegister& reg8 = NoCPUReg);
368
369
370typedef FPRegister DoubleRegister;
371
Ben Murdoch097c5b22016-05-18 11:27:45 +0100372// TODO(arm64) Define SIMD registers.
373typedef FPRegister Simd128Register;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000374
375// -----------------------------------------------------------------------------
376// Lists of registers.
377class CPURegList {
378 public:
379 explicit CPURegList(CPURegister reg1,
380 CPURegister reg2 = NoCPUReg,
381 CPURegister reg3 = NoCPUReg,
382 CPURegister reg4 = NoCPUReg)
383 : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
384 size_(reg1.SizeInBits()), type_(reg1.type()) {
385 DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
386 DCHECK(IsValid());
387 }
388
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000389 CPURegList(CPURegister::RegisterType type, int size, RegList list)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000390 : list_(list), size_(size), type_(type) {
391 DCHECK(IsValid());
392 }
393
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000394 CPURegList(CPURegister::RegisterType type, int size, int first_reg,
395 int last_reg)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000396 : size_(size), type_(type) {
397 DCHECK(((type == CPURegister::kRegister) &&
398 (last_reg < kNumberOfRegisters)) ||
399 ((type == CPURegister::kFPRegister) &&
400 (last_reg < kNumberOfFPRegisters)));
401 DCHECK(last_reg >= first_reg);
402 list_ = (1UL << (last_reg + 1)) - 1;
403 list_ &= ~((1UL << first_reg) - 1);
404 DCHECK(IsValid());
405 }
406
407 CPURegister::RegisterType type() const {
408 DCHECK(IsValid());
409 return type_;
410 }
411
412 RegList list() const {
413 DCHECK(IsValid());
414 return list_;
415 }
416
417 inline void set_list(RegList new_list) {
418 DCHECK(IsValid());
419 list_ = new_list;
420 }
421
422 // Combine another CPURegList into this one. Registers that already exist in
423 // this list are left unchanged. The type and size of the registers in the
424 // 'other' list must match those in this list.
425 void Combine(const CPURegList& other);
426
427 // Remove every register in the other CPURegList from this one. Registers that
428 // do not exist in this list are ignored. The type of the registers in the
429 // 'other' list must match those in this list.
430 void Remove(const CPURegList& other);
431
432 // Variants of Combine and Remove which take CPURegisters.
433 void Combine(const CPURegister& other);
434 void Remove(const CPURegister& other1,
435 const CPURegister& other2 = NoCPUReg,
436 const CPURegister& other3 = NoCPUReg,
437 const CPURegister& other4 = NoCPUReg);
438
439 // Variants of Combine and Remove which take a single register by its code;
440 // the type and size of the register is inferred from this list.
441 void Combine(int code);
442 void Remove(int code);
443
444 // Remove all callee-saved registers from the list. This can be useful when
445 // preparing registers for an AAPCS64 function call, for example.
446 void RemoveCalleeSaved();
447
448 CPURegister PopLowestIndex();
449 CPURegister PopHighestIndex();
450
451 // AAPCS64 callee-saved registers.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000452 static CPURegList GetCalleeSaved(int size = kXRegSizeInBits);
453 static CPURegList GetCalleeSavedFP(int size = kDRegSizeInBits);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000454
455 // AAPCS64 caller-saved registers. Note that this includes lr.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000456 static CPURegList GetCallerSaved(int size = kXRegSizeInBits);
457 static CPURegList GetCallerSavedFP(int size = kDRegSizeInBits);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000458
459 // Registers saved as safepoints.
460 static CPURegList GetSafepointSavedRegisters();
461
462 bool IsEmpty() const {
463 DCHECK(IsValid());
464 return list_ == 0;
465 }
466
467 bool IncludesAliasOf(const CPURegister& other1,
468 const CPURegister& other2 = NoCPUReg,
469 const CPURegister& other3 = NoCPUReg,
470 const CPURegister& other4 = NoCPUReg) const {
471 DCHECK(IsValid());
472 RegList list = 0;
473 if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
474 if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
475 if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
476 if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
477 return (list_ & list) != 0;
478 }
479
480 int Count() const {
481 DCHECK(IsValid());
482 return CountSetBits(list_, kRegListSizeInBits);
483 }
484
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000485 int RegisterSizeInBits() const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000486 DCHECK(IsValid());
487 return size_;
488 }
489
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000490 int RegisterSizeInBytes() const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000491 int size_in_bits = RegisterSizeInBits();
492 DCHECK((size_in_bits % kBitsPerByte) == 0);
493 return size_in_bits / kBitsPerByte;
494 }
495
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000496 int TotalSizeInBytes() const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000497 DCHECK(IsValid());
498 return RegisterSizeInBytes() * Count();
499 }
500
501 private:
502 RegList list_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000503 int size_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000504 CPURegister::RegisterType type_;
505
506 bool IsValid() const {
507 const RegList kValidRegisters = 0x8000000ffffffff;
508 const RegList kValidFPRegisters = 0x0000000ffffffff;
509 switch (type_) {
510 case CPURegister::kRegister:
511 return (list_ & kValidRegisters) == list_;
512 case CPURegister::kFPRegister:
513 return (list_ & kValidFPRegisters) == list_;
514 case CPURegister::kNoRegister:
515 return list_ == 0;
516 default:
517 UNREACHABLE();
518 return false;
519 }
520 }
521};
522
523
524// AAPCS64 callee-saved registers.
525#define kCalleeSaved CPURegList::GetCalleeSaved()
526#define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
527
528
529// AAPCS64 caller-saved registers. Note that this includes lr.
530#define kCallerSaved CPURegList::GetCallerSaved()
531#define kCallerSavedFP CPURegList::GetCallerSavedFP()
532
533// -----------------------------------------------------------------------------
534// Immediates.
535class Immediate {
536 public:
537 template<typename T>
538 inline explicit Immediate(Handle<T> handle);
539
540 // This is allowed to be an implicit constructor because Immediate is
541 // a wrapper class that doesn't normally perform any type conversion.
542 template<typename T>
543 inline Immediate(T value); // NOLINT(runtime/explicit)
544
545 template<typename T>
546 inline Immediate(T value, RelocInfo::Mode rmode);
547
548 int64_t value() const { return value_; }
549 RelocInfo::Mode rmode() const { return rmode_; }
550
551 private:
552 void InitializeHandle(Handle<Object> value);
553
554 int64_t value_;
555 RelocInfo::Mode rmode_;
556};
557
558
559// -----------------------------------------------------------------------------
560// Operands.
561const int kSmiShift = kSmiTagSize + kSmiShiftSize;
562const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
563
564// Represents an operand in a machine instruction.
565class Operand {
566 // TODO(all): If necessary, study more in details which methods
567 // TODO(all): should be inlined or not.
568 public:
569 // rm, {<shift> {#<shift_amount>}}
570 // where <shift> is one of {LSL, LSR, ASR, ROR}.
571 // <shift_amount> is uint6_t.
572 // This is allowed to be an implicit constructor because Operand is
573 // a wrapper class that doesn't normally perform any type conversion.
574 inline Operand(Register reg,
575 Shift shift = LSL,
576 unsigned shift_amount = 0); // NOLINT(runtime/explicit)
577
578 // rm, <extend> {#<shift_amount>}
579 // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
580 // <shift_amount> is uint2_t.
581 inline Operand(Register reg,
582 Extend extend,
583 unsigned shift_amount = 0);
584
585 template<typename T>
586 inline explicit Operand(Handle<T> handle);
587
588 // Implicit constructor for all int types, ExternalReference, and Smi.
589 template<typename T>
590 inline Operand(T t); // NOLINT(runtime/explicit)
591
592 // Implicit constructor for int types.
593 template<typename T>
594 inline Operand(T t, RelocInfo::Mode rmode);
595
596 inline bool IsImmediate() const;
597 inline bool IsShiftedRegister() const;
598 inline bool IsExtendedRegister() const;
599 inline bool IsZero() const;
600
601 // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
602 // which helps in the encoding of instructions that use the stack pointer.
603 inline Operand ToExtendedRegister() const;
604
605 inline Immediate immediate() const;
606 inline int64_t ImmediateValue() const;
607 inline Register reg() const;
608 inline Shift shift() const;
609 inline Extend extend() const;
610 inline unsigned shift_amount() const;
611
612 // Relocation information.
613 bool NeedsRelocation(const Assembler* assembler) const;
614
615 // Helpers
616 inline static Operand UntagSmi(Register smi);
617 inline static Operand UntagSmiAndScale(Register smi, int scale);
618
619 private:
620 Immediate immediate_;
621 Register reg_;
622 Shift shift_;
623 Extend extend_;
624 unsigned shift_amount_;
625};
626
627
628// MemOperand represents a memory operand in a load or store instruction.
629class MemOperand {
630 public:
631 inline MemOperand();
632 inline explicit MemOperand(Register base,
633 int64_t offset = 0,
634 AddrMode addrmode = Offset);
635 inline explicit MemOperand(Register base,
636 Register regoffset,
637 Shift shift = LSL,
638 unsigned shift_amount = 0);
639 inline explicit MemOperand(Register base,
640 Register regoffset,
641 Extend extend,
642 unsigned shift_amount = 0);
643 inline explicit MemOperand(Register base,
644 const Operand& offset,
645 AddrMode addrmode = Offset);
646
647 const Register& base() const { return base_; }
648 const Register& regoffset() const { return regoffset_; }
649 int64_t offset() const { return offset_; }
650 AddrMode addrmode() const { return addrmode_; }
651 Shift shift() const { return shift_; }
652 Extend extend() const { return extend_; }
653 unsigned shift_amount() const { return shift_amount_; }
654 inline bool IsImmediateOffset() const;
655 inline bool IsRegisterOffset() const;
656 inline bool IsPreIndex() const;
657 inline bool IsPostIndex() const;
658
659 // For offset modes, return the offset as an Operand. This helper cannot
660 // handle indexed modes.
661 inline Operand OffsetAsOperand() const;
662
663 enum PairResult {
664 kNotPair, // Can't use a pair instruction.
665 kPairAB, // Can use a pair instruction (operandA has lower address).
666 kPairBA // Can use a pair instruction (operandB has lower address).
667 };
668 // Check if two MemOperand are consistent for stp/ldp use.
669 static PairResult AreConsistentForPair(const MemOperand& operandA,
670 const MemOperand& operandB,
671 int access_size_log2 = kXRegSizeLog2);
672
673 private:
674 Register base_;
675 Register regoffset_;
676 int64_t offset_;
677 AddrMode addrmode_;
678 Shift shift_;
679 Extend extend_;
680 unsigned shift_amount_;
681};
682
683
684class ConstPool {
685 public:
686 explicit ConstPool(Assembler* assm)
687 : assm_(assm),
688 first_use_(-1),
689 shared_entries_count(0) {}
690 void RecordEntry(intptr_t data, RelocInfo::Mode mode);
691 int EntryCount() const {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000692 return shared_entries_count + static_cast<int>(unique_entries_.size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000693 }
694 bool IsEmpty() const {
695 return shared_entries_.empty() && unique_entries_.empty();
696 }
697 // Distance in bytes between the current pc and the first instruction
698 // using the pool. If there are no pending entries return kMaxInt.
699 int DistanceToFirstUse();
700 // Offset after which instructions using the pool will be out of range.
701 int MaxPcOffset();
702 // Maximum size the constant pool can be with current entries. It always
703 // includes alignment padding and branch over.
704 int WorstCaseSize();
705 // Size in bytes of the literal pool *if* it is emitted at the current
706 // pc. The size will include the branch over the pool if it was requested.
707 int SizeIfEmittedAtCurrentPc(bool require_jump);
708 // Emit the literal pool at the current pc with a branch over the pool if
709 // requested.
710 void Emit(bool require_jump);
711 // Discard any pending pool entries.
712 void Clear();
713
714 private:
715 bool CanBeShared(RelocInfo::Mode mode);
716 void EmitMarker();
717 void EmitGuard();
718 void EmitEntries();
719
720 Assembler* assm_;
721 // Keep track of the first instruction requiring a constant pool entry
722 // since the previous constant pool was emitted.
723 int first_use_;
724 // values, pc offset(s) of entries which can be shared.
725 std::multimap<uint64_t, int> shared_entries_;
726 // Number of distinct literal in shared entries.
727 int shared_entries_count;
728 // values, pc offset of entries which cannot be shared.
729 std::vector<std::pair<uint64_t, int> > unique_entries_;
730};
731
732
733// -----------------------------------------------------------------------------
734// Assembler.
735
736class Assembler : public AssemblerBase {
737 public:
738 // Create an assembler. Instructions and relocation information are emitted
739 // into a buffer, with the instructions starting from the beginning and the
740 // relocation information starting from the end of the buffer. See CodeDesc
741 // for a detailed comment on the layout (globals.h).
742 //
743 // If the provided buffer is NULL, the assembler allocates and grows its own
744 // buffer, and buffer_size determines the initial buffer size. The buffer is
745 // owned by the assembler and deallocated upon destruction of the assembler.
746 //
747 // If the provided buffer is not NULL, the assembler uses the provided buffer
748 // for code generation and assumes its size to be buffer_size. If the buffer
749 // is too small, a fatal error occurs. No deallocation of the buffer is done
750 // upon destruction of the assembler.
751 Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
752
753 virtual ~Assembler();
754
755 virtual void AbortedCodeGeneration() {
756 constpool_.Clear();
757 }
758
759 // System functions ---------------------------------------------------------
760 // Start generating code from the beginning of the buffer, discarding any code
761 // and data that has already been emitted into the buffer.
762 //
763 // In order to avoid any accidental transfer of state, Reset DCHECKs that the
764 // constant pool is not blocked.
765 void Reset();
766
767 // GetCode emits any pending (non-emitted) code and fills the descriptor
768 // desc. GetCode() is idempotent; it returns the same result if no other
769 // Assembler functions are invoked in between GetCode() calls.
770 //
771 // The descriptor (desc) can be NULL. In that case, the code is finalized as
772 // usual, but the descriptor is not populated.
773 void GetCode(CodeDesc* desc);
774
775 // Insert the smallest number of nop instructions
776 // possible to align the pc offset to a multiple
777 // of m. m must be a power of 2 (>= 4).
778 void Align(int m);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000779 // Insert the smallest number of zero bytes possible to align the pc offset
780 // to a mulitple of m. m must be a power of 2 (>= 2).
781 void DataAlign(int m);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000782
783 inline void Unreachable();
784
785 // Label --------------------------------------------------------------------
786 // Bind a label to the current pc. Note that labels can only be bound once,
787 // and if labels are linked to other instructions, they _must_ be bound
788 // before they go out of scope.
789 void bind(Label* label);
790
791
792 // RelocInfo and pools ------------------------------------------------------
793
794 // Record relocation information for current pc_.
795 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
796
797 // Return the address in the constant pool of the code target address used by
798 // the branch/call instruction at pc.
799 inline static Address target_pointer_address_at(Address pc);
800
801 // Read/Modify the code target address in the branch/call instruction at pc.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000802 inline static Address target_address_at(Address pc, Address constant_pool);
803 inline static void set_target_address_at(
804 Isolate* isolate, Address pc, Address constant_pool, Address target,
805 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000806 static inline Address target_address_at(Address pc, Code* code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000807 static inline void set_target_address_at(
808 Isolate* isolate, Address pc, Code* code, Address target,
809 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000810
811 // Return the code target address at a call site from the return address of
812 // that call in the instruction stream.
813 inline static Address target_address_from_return_address(Address pc);
814
815 // Given the address of the beginning of a call, return the address in the
816 // instruction stream that call will return from.
817 inline static Address return_address_from_call_start(Address pc);
818
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000819 // This sets the branch destination (which is in the constant pool on ARM).
820 // This is for calls and branches within generated code.
821 inline static void deserialization_set_special_target_at(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000822 Isolate* isolate, Address constant_pool_entry, Code* code,
823 Address target);
824
825 // This sets the internal reference at the pc.
826 inline static void deserialization_set_target_internal_reference_at(
827 Isolate* isolate, Address pc, Address target,
828 RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000829
830 // All addresses in the constant pool are the same size as pointers.
831 static const int kSpecialTargetSize = kPointerSize;
832
833 // The sizes of the call sequences emitted by MacroAssembler::Call.
834 // Wherever possible, use MacroAssembler::CallSize instead of these constants,
835 // as it will choose the correct value for a given relocation mode.
836 //
837 // Without relocation:
838 // movz temp, #(target & 0x000000000000ffff)
839 // movk temp, #(target & 0x00000000ffff0000)
840 // movk temp, #(target & 0x0000ffff00000000)
841 // blr temp
842 //
843 // With relocation:
844 // ldr temp, =target
845 // blr temp
846 static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
847 static const int kCallSizeWithRelocation = 2 * kInstructionSize;
848
849 // Size of the generated code in bytes
850 uint64_t SizeOfGeneratedCode() const {
851 DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
852 return pc_ - buffer_;
853 }
854
855 // Return the code size generated from label to the current position.
856 uint64_t SizeOfCodeGeneratedSince(const Label* label) {
857 DCHECK(label->is_bound());
858 DCHECK(pc_offset() >= label->pos());
859 DCHECK(pc_offset() < buffer_size_);
860 return pc_offset() - label->pos();
861 }
862
863 // Check the size of the code generated since the given label. This function
864 // is used primarily to work around comparisons between signed and unsigned
865 // quantities, since V8 uses both.
866 // TODO(jbramley): Work out what sign to use for these things and if possible,
867 // change things to be consistent.
868 void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
869 DCHECK(size >= 0);
870 DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
871 }
872
873 // Return the number of instructions generated from label to the
874 // current position.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000875 uint64_t InstructionsGeneratedSince(const Label* label) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000876 return SizeOfCodeGeneratedSince(label) / kInstructionSize;
877 }
878
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000879 static const int kPatchDebugBreakSlotAddressOffset = 0;
880
881 // Number of instructions necessary to be able to later patch it to a call.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000882 static const int kDebugBreakSlotInstructions = 5;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000883 static const int kDebugBreakSlotLength =
884 kDebugBreakSlotInstructions * kInstructionSize;
885
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000886 // Prevent contant pool emission until EndBlockConstPool is called.
887 // Call to this function can be nested but must be followed by an equal
888 // number of call to EndBlockConstpool.
889 void StartBlockConstPool();
890
891 // Resume constant pool emission. Need to be called as many time as
892 // StartBlockConstPool to have an effect.
893 void EndBlockConstPool();
894
895 bool is_const_pool_blocked() const;
896 static bool IsConstantPoolAt(Instruction* instr);
897 static int ConstantPoolSizeAt(Instruction* instr);
898 // See Assembler::CheckConstPool for more info.
899 void EmitPoolGuard();
900
901 // Prevent veneer pool emission until EndBlockVeneerPool is called.
902 // Call to this function can be nested but must be followed by an equal
903 // number of call to EndBlockConstpool.
904 void StartBlockVeneerPool();
905
906 // Resume constant pool emission. Need to be called as many time as
907 // StartBlockVeneerPool to have an effect.
908 void EndBlockVeneerPool();
909
910 bool is_veneer_pool_blocked() const {
911 return veneer_pool_blocked_nesting_ > 0;
912 }
913
914 // Block/resume emission of constant pools and veneer pools.
915 void StartBlockPools() {
916 StartBlockConstPool();
917 StartBlockVeneerPool();
918 }
919 void EndBlockPools() {
920 EndBlockConstPool();
921 EndBlockVeneerPool();
922 }
923
924 // Debugging ----------------------------------------------------------------
925 PositionsRecorder* positions_recorder() { return &positions_recorder_; }
926 void RecordComment(const char* msg);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000927
928 // Record a deoptimization reason that can be used by a log or cpu profiler.
929 // Use --trace-deopt to enable.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100930 void RecordDeoptReason(const int reason, int raw_position);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000931
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000932 int buffer_space() const;
933
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000934 // Mark generator continuation.
935 void RecordGeneratorContinuation();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000936
937 // Mark address of a debug break slot.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000938 void RecordDebugBreakSlot(RelocInfo::Mode mode);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000939
940 // Record the emission of a constant pool.
941 //
942 // The emission of constant and veneer pools depends on the size of the code
943 // generated and the number of RelocInfo recorded.
944 // The Debug mechanism needs to map code offsets between two versions of a
945 // function, compiled with and without debugger support (see for example
946 // Debug::PrepareForBreakPoints()).
947 // Compiling functions with debugger support generates additional code
948 // (DebugCodegen::GenerateSlot()). This may affect the emission of the pools
949 // and cause the version of the code with debugger support to have pools
950 // generated in different places.
951 // Recording the position and size of emitted pools allows to correctly
952 // compute the offset mappings between the different versions of a function in
953 // all situations.
954 //
955 // The parameter indicates the size of the pool (in bytes), including
956 // the marker and branch over the data.
957 void RecordConstPool(int size);
958
959
960 // Instruction set functions ------------------------------------------------
961
962 // Branch / Jump instructions.
963 // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
964 // Branch to register.
965 void br(const Register& xn);
966
967 // Branch-link to register.
968 void blr(const Register& xn);
969
970 // Branch to register with return hint.
971 void ret(const Register& xn = lr);
972
973 // Unconditional branch to label.
974 void b(Label* label);
975
976 // Conditional branch to label.
977 void b(Label* label, Condition cond);
978
979 // Unconditional branch to PC offset.
980 void b(int imm26);
981
982 // Conditional branch to PC offset.
983 void b(int imm19, Condition cond);
984
985 // Branch-link to label / pc offset.
986 void bl(Label* label);
987 void bl(int imm26);
988
989 // Compare and branch to label / pc offset if zero.
990 void cbz(const Register& rt, Label* label);
991 void cbz(const Register& rt, int imm19);
992
993 // Compare and branch to label / pc offset if not zero.
994 void cbnz(const Register& rt, Label* label);
995 void cbnz(const Register& rt, int imm19);
996
997 // Test bit and branch to label / pc offset if zero.
998 void tbz(const Register& rt, unsigned bit_pos, Label* label);
999 void tbz(const Register& rt, unsigned bit_pos, int imm14);
1000
1001 // Test bit and branch to label / pc offset if not zero.
1002 void tbnz(const Register& rt, unsigned bit_pos, Label* label);
1003 void tbnz(const Register& rt, unsigned bit_pos, int imm14);
1004
1005 // Address calculation instructions.
1006 // Calculate a PC-relative address. Unlike for branches the offset in adr is
1007 // unscaled (i.e. the result can be unaligned).
1008 void adr(const Register& rd, Label* label);
1009 void adr(const Register& rd, int imm21);
1010
1011 // Data Processing instructions.
1012 // Add.
1013 void add(const Register& rd,
1014 const Register& rn,
1015 const Operand& operand);
1016
1017 // Add and update status flags.
1018 void adds(const Register& rd,
1019 const Register& rn,
1020 const Operand& operand);
1021
1022 // Compare negative.
1023 void cmn(const Register& rn, const Operand& operand);
1024
1025 // Subtract.
1026 void sub(const Register& rd,
1027 const Register& rn,
1028 const Operand& operand);
1029
1030 // Subtract and update status flags.
1031 void subs(const Register& rd,
1032 const Register& rn,
1033 const Operand& operand);
1034
1035 // Compare.
1036 void cmp(const Register& rn, const Operand& operand);
1037
1038 // Negate.
1039 void neg(const Register& rd,
1040 const Operand& operand);
1041
1042 // Negate and update status flags.
1043 void negs(const Register& rd,
1044 const Operand& operand);
1045
1046 // Add with carry bit.
1047 void adc(const Register& rd,
1048 const Register& rn,
1049 const Operand& operand);
1050
1051 // Add with carry bit and update status flags.
1052 void adcs(const Register& rd,
1053 const Register& rn,
1054 const Operand& operand);
1055
1056 // Subtract with carry bit.
1057 void sbc(const Register& rd,
1058 const Register& rn,
1059 const Operand& operand);
1060
1061 // Subtract with carry bit and update status flags.
1062 void sbcs(const Register& rd,
1063 const Register& rn,
1064 const Operand& operand);
1065
1066 // Negate with carry bit.
1067 void ngc(const Register& rd,
1068 const Operand& operand);
1069
1070 // Negate with carry bit and update status flags.
1071 void ngcs(const Register& rd,
1072 const Operand& operand);
1073
1074 // Logical instructions.
1075 // Bitwise and (A & B).
1076 void and_(const Register& rd,
1077 const Register& rn,
1078 const Operand& operand);
1079
1080 // Bitwise and (A & B) and update status flags.
1081 void ands(const Register& rd,
1082 const Register& rn,
1083 const Operand& operand);
1084
1085 // Bit test, and set flags.
1086 void tst(const Register& rn, const Operand& operand);
1087
1088 // Bit clear (A & ~B).
1089 void bic(const Register& rd,
1090 const Register& rn,
1091 const Operand& operand);
1092
1093 // Bit clear (A & ~B) and update status flags.
1094 void bics(const Register& rd,
1095 const Register& rn,
1096 const Operand& operand);
1097
1098 // Bitwise or (A | B).
1099 void orr(const Register& rd, const Register& rn, const Operand& operand);
1100
1101 // Bitwise nor (A | ~B).
1102 void orn(const Register& rd, const Register& rn, const Operand& operand);
1103
1104 // Bitwise eor/xor (A ^ B).
1105 void eor(const Register& rd, const Register& rn, const Operand& operand);
1106
1107 // Bitwise enor/xnor (A ^ ~B).
1108 void eon(const Register& rd, const Register& rn, const Operand& operand);
1109
1110 // Logical shift left variable.
1111 void lslv(const Register& rd, const Register& rn, const Register& rm);
1112
1113 // Logical shift right variable.
1114 void lsrv(const Register& rd, const Register& rn, const Register& rm);
1115
1116 // Arithmetic shift right variable.
1117 void asrv(const Register& rd, const Register& rn, const Register& rm);
1118
1119 // Rotate right variable.
1120 void rorv(const Register& rd, const Register& rn, const Register& rm);
1121
1122 // Bitfield instructions.
1123 // Bitfield move.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001124 void bfm(const Register& rd, const Register& rn, int immr, int imms);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001125
1126 // Signed bitfield move.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001127 void sbfm(const Register& rd, const Register& rn, int immr, int imms);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001128
1129 // Unsigned bitfield move.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001130 void ubfm(const Register& rd, const Register& rn, int immr, int imms);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001131
1132 // Bfm aliases.
1133 // Bitfield insert.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001134 void bfi(const Register& rd, const Register& rn, int lsb, int width) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001135 DCHECK(width >= 1);
1136 DCHECK(lsb + width <= rn.SizeInBits());
1137 bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1138 }
1139
1140 // Bitfield extract and insert low.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001141 void bfxil(const Register& rd, const Register& rn, int lsb, int width) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001142 DCHECK(width >= 1);
1143 DCHECK(lsb + width <= rn.SizeInBits());
1144 bfm(rd, rn, lsb, lsb + width - 1);
1145 }
1146
1147 // Sbfm aliases.
1148 // Arithmetic shift right.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001149 void asr(const Register& rd, const Register& rn, int shift) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001150 DCHECK(shift < rd.SizeInBits());
1151 sbfm(rd, rn, shift, rd.SizeInBits() - 1);
1152 }
1153
1154 // Signed bitfield insert in zero.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001155 void sbfiz(const Register& rd, const Register& rn, int lsb, int width) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001156 DCHECK(width >= 1);
1157 DCHECK(lsb + width <= rn.SizeInBits());
1158 sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1159 }
1160
1161 // Signed bitfield extract.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001162 void sbfx(const Register& rd, const Register& rn, int lsb, int width) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001163 DCHECK(width >= 1);
1164 DCHECK(lsb + width <= rn.SizeInBits());
1165 sbfm(rd, rn, lsb, lsb + width - 1);
1166 }
1167
1168 // Signed extend byte.
1169 void sxtb(const Register& rd, const Register& rn) {
1170 sbfm(rd, rn, 0, 7);
1171 }
1172
1173 // Signed extend halfword.
1174 void sxth(const Register& rd, const Register& rn) {
1175 sbfm(rd, rn, 0, 15);
1176 }
1177
1178 // Signed extend word.
1179 void sxtw(const Register& rd, const Register& rn) {
1180 sbfm(rd, rn, 0, 31);
1181 }
1182
1183 // Ubfm aliases.
1184 // Logical shift left.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001185 void lsl(const Register& rd, const Register& rn, int shift) {
1186 int reg_size = rd.SizeInBits();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001187 DCHECK(shift < reg_size);
1188 ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
1189 }
1190
1191 // Logical shift right.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001192 void lsr(const Register& rd, const Register& rn, int shift) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001193 DCHECK(shift < rd.SizeInBits());
1194 ubfm(rd, rn, shift, rd.SizeInBits() - 1);
1195 }
1196
1197 // Unsigned bitfield insert in zero.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001198 void ubfiz(const Register& rd, const Register& rn, int lsb, int width) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001199 DCHECK(width >= 1);
1200 DCHECK(lsb + width <= rn.SizeInBits());
1201 ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1202 }
1203
1204 // Unsigned bitfield extract.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001205 void ubfx(const Register& rd, const Register& rn, int lsb, int width) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001206 DCHECK(width >= 1);
1207 DCHECK(lsb + width <= rn.SizeInBits());
1208 ubfm(rd, rn, lsb, lsb + width - 1);
1209 }
1210
1211 // Unsigned extend byte.
1212 void uxtb(const Register& rd, const Register& rn) {
1213 ubfm(rd, rn, 0, 7);
1214 }
1215
1216 // Unsigned extend halfword.
1217 void uxth(const Register& rd, const Register& rn) {
1218 ubfm(rd, rn, 0, 15);
1219 }
1220
1221 // Unsigned extend word.
1222 void uxtw(const Register& rd, const Register& rn) {
1223 ubfm(rd, rn, 0, 31);
1224 }
1225
1226 // Extract.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001227 void extr(const Register& rd, const Register& rn, const Register& rm,
1228 int lsb);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001229
1230 // Conditional select: rd = cond ? rn : rm.
1231 void csel(const Register& rd,
1232 const Register& rn,
1233 const Register& rm,
1234 Condition cond);
1235
1236 // Conditional select increment: rd = cond ? rn : rm + 1.
1237 void csinc(const Register& rd,
1238 const Register& rn,
1239 const Register& rm,
1240 Condition cond);
1241
1242 // Conditional select inversion: rd = cond ? rn : ~rm.
1243 void csinv(const Register& rd,
1244 const Register& rn,
1245 const Register& rm,
1246 Condition cond);
1247
1248 // Conditional select negation: rd = cond ? rn : -rm.
1249 void csneg(const Register& rd,
1250 const Register& rn,
1251 const Register& rm,
1252 Condition cond);
1253
1254 // Conditional set: rd = cond ? 1 : 0.
1255 void cset(const Register& rd, Condition cond);
1256
1257 // Conditional set minus: rd = cond ? -1 : 0.
1258 void csetm(const Register& rd, Condition cond);
1259
1260 // Conditional increment: rd = cond ? rn + 1 : rn.
1261 void cinc(const Register& rd, const Register& rn, Condition cond);
1262
1263 // Conditional invert: rd = cond ? ~rn : rn.
1264 void cinv(const Register& rd, const Register& rn, Condition cond);
1265
1266 // Conditional negate: rd = cond ? -rn : rn.
1267 void cneg(const Register& rd, const Register& rn, Condition cond);
1268
1269 // Extr aliases.
1270 void ror(const Register& rd, const Register& rs, unsigned shift) {
1271 extr(rd, rs, rs, shift);
1272 }
1273
1274 // Conditional comparison.
1275 // Conditional compare negative.
1276 void ccmn(const Register& rn,
1277 const Operand& operand,
1278 StatusFlags nzcv,
1279 Condition cond);
1280
1281 // Conditional compare.
1282 void ccmp(const Register& rn,
1283 const Operand& operand,
1284 StatusFlags nzcv,
1285 Condition cond);
1286
1287 // Multiplication.
1288 // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
1289 void mul(const Register& rd, const Register& rn, const Register& rm);
1290
1291 // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
1292 void madd(const Register& rd,
1293 const Register& rn,
1294 const Register& rm,
1295 const Register& ra);
1296
1297 // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
1298 void mneg(const Register& rd, const Register& rn, const Register& rm);
1299
1300 // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
1301 void msub(const Register& rd,
1302 const Register& rn,
1303 const Register& rm,
1304 const Register& ra);
1305
1306 // 32 x 32 -> 64-bit multiply.
1307 void smull(const Register& rd, const Register& rn, const Register& rm);
1308
1309 // Xd = bits<127:64> of Xn * Xm.
1310 void smulh(const Register& rd, const Register& rn, const Register& rm);
1311
1312 // Signed 32 x 32 -> 64-bit multiply and accumulate.
1313 void smaddl(const Register& rd,
1314 const Register& rn,
1315 const Register& rm,
1316 const Register& ra);
1317
1318 // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
1319 void umaddl(const Register& rd,
1320 const Register& rn,
1321 const Register& rm,
1322 const Register& ra);
1323
1324 // Signed 32 x 32 -> 64-bit multiply and subtract.
1325 void smsubl(const Register& rd,
1326 const Register& rn,
1327 const Register& rm,
1328 const Register& ra);
1329
1330 // Unsigned 32 x 32 -> 64-bit multiply and subtract.
1331 void umsubl(const Register& rd,
1332 const Register& rn,
1333 const Register& rm,
1334 const Register& ra);
1335
1336 // Signed integer divide.
1337 void sdiv(const Register& rd, const Register& rn, const Register& rm);
1338
1339 // Unsigned integer divide.
1340 void udiv(const Register& rd, const Register& rn, const Register& rm);
1341
1342 // Bit count, bit reverse and endian reverse.
1343 void rbit(const Register& rd, const Register& rn);
1344 void rev16(const Register& rd, const Register& rn);
1345 void rev32(const Register& rd, const Register& rn);
1346 void rev(const Register& rd, const Register& rn);
1347 void clz(const Register& rd, const Register& rn);
1348 void cls(const Register& rd, const Register& rn);
1349
1350 // Memory instructions.
1351
1352 // Load integer or FP register.
1353 void ldr(const CPURegister& rt, const MemOperand& src);
1354
1355 // Store integer or FP register.
1356 void str(const CPURegister& rt, const MemOperand& dst);
1357
1358 // Load word with sign extension.
1359 void ldrsw(const Register& rt, const MemOperand& src);
1360
1361 // Load byte.
1362 void ldrb(const Register& rt, const MemOperand& src);
1363
1364 // Store byte.
1365 void strb(const Register& rt, const MemOperand& dst);
1366
1367 // Load byte with sign extension.
1368 void ldrsb(const Register& rt, const MemOperand& src);
1369
1370 // Load half-word.
1371 void ldrh(const Register& rt, const MemOperand& src);
1372
1373 // Store half-word.
1374 void strh(const Register& rt, const MemOperand& dst);
1375
1376 // Load half-word with sign extension.
1377 void ldrsh(const Register& rt, const MemOperand& src);
1378
1379 // Load integer or FP register pair.
1380 void ldp(const CPURegister& rt, const CPURegister& rt2,
1381 const MemOperand& src);
1382
1383 // Store integer or FP register pair.
1384 void stp(const CPURegister& rt, const CPURegister& rt2,
1385 const MemOperand& dst);
1386
1387 // Load word pair with sign extension.
1388 void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
1389
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001390 // Load literal to register from a pc relative address.
1391 void ldr_pcrel(const CPURegister& rt, int imm19);
1392
1393 // Load literal to register.
1394 void ldr(const CPURegister& rt, const Immediate& imm);
1395
1396 // Move instructions. The default shift of -1 indicates that the move
1397 // instruction will calculate an appropriate 16-bit immediate and left shift
1398 // that is equal to the 64-bit immediate argument. If an explicit left shift
1399 // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
1400 //
1401 // For movk, an explicit shift can be used to indicate which half word should
1402 // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
1403 // half word with zero, whereas movk(x0, 0, 48) will overwrite the
1404 // most-significant.
1405
1406 // Move and keep.
1407 void movk(const Register& rd, uint64_t imm, int shift = -1) {
1408 MoveWide(rd, imm, shift, MOVK);
1409 }
1410
1411 // Move with non-zero.
1412 void movn(const Register& rd, uint64_t imm, int shift = -1) {
1413 MoveWide(rd, imm, shift, MOVN);
1414 }
1415
1416 // Move with zero.
1417 void movz(const Register& rd, uint64_t imm, int shift = -1) {
1418 MoveWide(rd, imm, shift, MOVZ);
1419 }
1420
1421 // Misc instructions.
1422 // Monitor debug-mode breakpoint.
1423 void brk(int code);
1424
1425 // Halting debug-mode breakpoint.
1426 void hlt(int code);
1427
1428 // Move register to register.
1429 void mov(const Register& rd, const Register& rn);
1430
1431 // Move NOT(operand) to register.
1432 void mvn(const Register& rd, const Operand& operand);
1433
1434 // System instructions.
1435 // Move to register from system register.
1436 void mrs(const Register& rt, SystemRegister sysreg);
1437
1438 // Move from register to system register.
1439 void msr(SystemRegister sysreg, const Register& rt);
1440
1441 // System hint.
1442 void hint(SystemHint code);
1443
1444 // Data memory barrier
1445 void dmb(BarrierDomain domain, BarrierType type);
1446
1447 // Data synchronization barrier
1448 void dsb(BarrierDomain domain, BarrierType type);
1449
1450 // Instruction synchronization barrier
1451 void isb();
1452
1453 // Alias for system instructions.
1454 void nop() { hint(NOP); }
1455
1456 // Different nop operations are used by the code generator to detect certain
1457 // states of the generated code.
1458 enum NopMarkerTypes {
1459 DEBUG_BREAK_NOP,
1460 INTERRUPT_CODE_NOP,
1461 ADR_FAR_NOP,
1462 FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
1463 LAST_NOP_MARKER = ADR_FAR_NOP
1464 };
1465
1466 void nop(NopMarkerTypes n) {
1467 DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
1468 mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
1469 }
1470
1471 // FP instructions.
1472 // Move immediate to FP register.
1473 void fmov(FPRegister fd, double imm);
1474 void fmov(FPRegister fd, float imm);
1475
1476 // Move FP register to register.
1477 void fmov(Register rd, FPRegister fn);
1478
1479 // Move register to FP register.
1480 void fmov(FPRegister fd, Register rn);
1481
1482 // Move FP register to FP register.
1483 void fmov(FPRegister fd, FPRegister fn);
1484
1485 // FP add.
1486 void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1487
1488 // FP subtract.
1489 void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1490
1491 // FP multiply.
1492 void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1493
1494 // FP fused multiply and add.
1495 void fmadd(const FPRegister& fd,
1496 const FPRegister& fn,
1497 const FPRegister& fm,
1498 const FPRegister& fa);
1499
1500 // FP fused multiply and subtract.
1501 void fmsub(const FPRegister& fd,
1502 const FPRegister& fn,
1503 const FPRegister& fm,
1504 const FPRegister& fa);
1505
1506 // FP fused multiply, add and negate.
1507 void fnmadd(const FPRegister& fd,
1508 const FPRegister& fn,
1509 const FPRegister& fm,
1510 const FPRegister& fa);
1511
1512 // FP fused multiply, subtract and negate.
1513 void fnmsub(const FPRegister& fd,
1514 const FPRegister& fn,
1515 const FPRegister& fm,
1516 const FPRegister& fa);
1517
1518 // FP divide.
1519 void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1520
1521 // FP maximum.
1522 void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1523
1524 // FP minimum.
1525 void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1526
1527 // FP maximum.
1528 void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1529
1530 // FP minimum.
1531 void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1532
1533 // FP absolute.
1534 void fabs(const FPRegister& fd, const FPRegister& fn);
1535
1536 // FP negate.
1537 void fneg(const FPRegister& fd, const FPRegister& fn);
1538
1539 // FP square root.
1540 void fsqrt(const FPRegister& fd, const FPRegister& fn);
1541
1542 // FP round to integer (nearest with ties to away).
1543 void frinta(const FPRegister& fd, const FPRegister& fn);
1544
1545 // FP round to integer (toward minus infinity).
1546 void frintm(const FPRegister& fd, const FPRegister& fn);
1547
1548 // FP round to integer (nearest with ties to even).
1549 void frintn(const FPRegister& fd, const FPRegister& fn);
1550
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001551 // FP round to integer (towards plus infinity).
1552 void frintp(const FPRegister& fd, const FPRegister& fn);
1553
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001554 // FP round to integer (towards zero.)
1555 void frintz(const FPRegister& fd, const FPRegister& fn);
1556
1557 // FP compare registers.
1558 void fcmp(const FPRegister& fn, const FPRegister& fm);
1559
1560 // FP compare immediate.
1561 void fcmp(const FPRegister& fn, double value);
1562
1563 // FP conditional compare.
1564 void fccmp(const FPRegister& fn,
1565 const FPRegister& fm,
1566 StatusFlags nzcv,
1567 Condition cond);
1568
1569 // FP conditional select.
1570 void fcsel(const FPRegister& fd,
1571 const FPRegister& fn,
1572 const FPRegister& fm,
1573 Condition cond);
1574
1575 // Common FP Convert function
1576 void FPConvertToInt(const Register& rd,
1577 const FPRegister& fn,
1578 FPIntegerConvertOp op);
1579
1580 // FP convert between single and double precision.
1581 void fcvt(const FPRegister& fd, const FPRegister& fn);
1582
1583 // Convert FP to unsigned integer (nearest with ties to away).
1584 void fcvtau(const Register& rd, const FPRegister& fn);
1585
1586 // Convert FP to signed integer (nearest with ties to away).
1587 void fcvtas(const Register& rd, const FPRegister& fn);
1588
1589 // Convert FP to unsigned integer (round towards -infinity).
1590 void fcvtmu(const Register& rd, const FPRegister& fn);
1591
1592 // Convert FP to signed integer (round towards -infinity).
1593 void fcvtms(const Register& rd, const FPRegister& fn);
1594
1595 // Convert FP to unsigned integer (nearest with ties to even).
1596 void fcvtnu(const Register& rd, const FPRegister& fn);
1597
1598 // Convert FP to signed integer (nearest with ties to even).
1599 void fcvtns(const Register& rd, const FPRegister& fn);
1600
1601 // Convert FP to unsigned integer (round towards zero).
1602 void fcvtzu(const Register& rd, const FPRegister& fn);
1603
1604 // Convert FP to signed integer (rounf towards zero).
1605 void fcvtzs(const Register& rd, const FPRegister& fn);
1606
1607 // Convert signed integer or fixed point to FP.
1608 void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1609
1610 // Convert unsigned integer or fixed point to FP.
1611 void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1612
1613 // Instruction functions used only for test, debug, and patching.
1614 // Emit raw instructions in the instruction stream.
1615 void dci(Instr raw_inst) { Emit(raw_inst); }
1616
1617 // Emit 8 bits of data in the instruction stream.
1618 void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
1619
1620 // Emit 32 bits of data in the instruction stream.
1621 void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
1622
1623 // Emit 64 bits of data in the instruction stream.
1624 void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
1625
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001626 // Emit an address in the instruction stream.
1627 void dcptr(Label* label);
1628
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001629 // Copy a string into the instruction stream, including the terminating NULL
1630 // character. The instruction pointer (pc_) is then aligned correctly for
1631 // subsequent instructions.
1632 void EmitStringData(const char* string);
1633
1634 // Pseudo-instructions ------------------------------------------------------
1635
1636 // Parameters are described in arm64/instructions-arm64.h.
1637 void debug(const char* message, uint32_t code, Instr params = BREAK);
1638
1639 // Required by V8.
1640 void dd(uint32_t data) { dc32(data); }
1641 void db(uint8_t data) { dc8(data); }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001642 void dq(uint64_t data) { dc64(data); }
1643 void dp(uintptr_t data) { dc64(data); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001644
1645 // Code generation helpers --------------------------------------------------
1646
1647 bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
1648
1649 Instruction* pc() const { return Instruction::Cast(pc_); }
1650
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001651 Instruction* InstructionAt(ptrdiff_t offset) const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001652 return reinterpret_cast<Instruction*>(buffer_ + offset);
1653 }
1654
1655 ptrdiff_t InstructionOffset(Instruction* instr) const {
1656 return reinterpret_cast<byte*>(instr) - buffer_;
1657 }
1658
1659 // Register encoding.
1660 static Instr Rd(CPURegister rd) {
1661 DCHECK(rd.code() != kSPRegInternalCode);
1662 return rd.code() << Rd_offset;
1663 }
1664
1665 static Instr Rn(CPURegister rn) {
1666 DCHECK(rn.code() != kSPRegInternalCode);
1667 return rn.code() << Rn_offset;
1668 }
1669
1670 static Instr Rm(CPURegister rm) {
1671 DCHECK(rm.code() != kSPRegInternalCode);
1672 return rm.code() << Rm_offset;
1673 }
1674
1675 static Instr Ra(CPURegister ra) {
1676 DCHECK(ra.code() != kSPRegInternalCode);
1677 return ra.code() << Ra_offset;
1678 }
1679
1680 static Instr Rt(CPURegister rt) {
1681 DCHECK(rt.code() != kSPRegInternalCode);
1682 return rt.code() << Rt_offset;
1683 }
1684
1685 static Instr Rt2(CPURegister rt2) {
1686 DCHECK(rt2.code() != kSPRegInternalCode);
1687 return rt2.code() << Rt2_offset;
1688 }
1689
1690 // These encoding functions allow the stack pointer to be encoded, and
1691 // disallow the zero register.
1692 static Instr RdSP(Register rd) {
1693 DCHECK(!rd.IsZero());
1694 return (rd.code() & kRegCodeMask) << Rd_offset;
1695 }
1696
1697 static Instr RnSP(Register rn) {
1698 DCHECK(!rn.IsZero());
1699 return (rn.code() & kRegCodeMask) << Rn_offset;
1700 }
1701
1702 // Flags encoding.
1703 inline static Instr Flags(FlagsUpdate S);
1704 inline static Instr Cond(Condition cond);
1705
1706 // PC-relative address encoding.
1707 inline static Instr ImmPCRelAddress(int imm21);
1708
1709 // Branch encoding.
1710 inline static Instr ImmUncondBranch(int imm26);
1711 inline static Instr ImmCondBranch(int imm19);
1712 inline static Instr ImmCmpBranch(int imm19);
1713 inline static Instr ImmTestBranch(int imm14);
1714 inline static Instr ImmTestBranchBit(unsigned bit_pos);
1715
1716 // Data Processing encoding.
1717 inline static Instr SF(Register rd);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001718 inline static Instr ImmAddSub(int imm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001719 inline static Instr ImmS(unsigned imms, unsigned reg_size);
1720 inline static Instr ImmR(unsigned immr, unsigned reg_size);
1721 inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
1722 inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
1723 inline static Instr ImmLLiteral(int imm19);
1724 inline static Instr BitN(unsigned bitn, unsigned reg_size);
1725 inline static Instr ShiftDP(Shift shift);
1726 inline static Instr ImmDPShift(unsigned amount);
1727 inline static Instr ExtendMode(Extend extend);
1728 inline static Instr ImmExtendShift(unsigned left_shift);
1729 inline static Instr ImmCondCmp(unsigned imm);
1730 inline static Instr Nzcv(StatusFlags nzcv);
1731
1732 static bool IsImmAddSub(int64_t immediate);
1733 static bool IsImmLogical(uint64_t value,
1734 unsigned width,
1735 unsigned* n,
1736 unsigned* imm_s,
1737 unsigned* imm_r);
1738
1739 // MemOperand offset encoding.
1740 inline static Instr ImmLSUnsigned(int imm12);
1741 inline static Instr ImmLS(int imm9);
1742 inline static Instr ImmLSPair(int imm7, LSDataSize size);
1743 inline static Instr ImmShiftLS(unsigned shift_amount);
1744 inline static Instr ImmException(int imm16);
1745 inline static Instr ImmSystemRegister(int imm15);
1746 inline static Instr ImmHint(int imm7);
1747 inline static Instr ImmBarrierDomain(int imm2);
1748 inline static Instr ImmBarrierType(int imm2);
1749 inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
1750
1751 static bool IsImmLSUnscaled(int64_t offset);
1752 static bool IsImmLSScaled(int64_t offset, LSDataSize size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001753 static bool IsImmLLiteral(int64_t offset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001754
1755 // Move immediates encoding.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001756 inline static Instr ImmMoveWide(int imm);
1757 inline static Instr ShiftMoveWide(int shift);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001758
1759 // FP Immediates.
1760 static Instr ImmFP32(float imm);
1761 static Instr ImmFP64(double imm);
1762 inline static Instr FPScale(unsigned scale);
1763
1764 // FP register type.
1765 inline static Instr FPType(FPRegister fd);
1766
1767 // Class for scoping postponing the constant pool generation.
1768 class BlockConstPoolScope {
1769 public:
1770 explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1771 assem_->StartBlockConstPool();
1772 }
1773 ~BlockConstPoolScope() {
1774 assem_->EndBlockConstPool();
1775 }
1776
1777 private:
1778 Assembler* assem_;
1779
1780 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1781 };
1782
1783 // Check if is time to emit a constant pool.
1784 void CheckConstPool(bool force_emit, bool require_jump);
1785
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001786 void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
1787 ConstantPoolEntry::Access access,
1788 ConstantPoolEntry::Type type) {
1789 // No embedded constant pool support.
1790 UNREACHABLE();
1791 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001792
1793 // Returns true if we should emit a veneer as soon as possible for a branch
1794 // which can at most reach to specified pc.
1795 bool ShouldEmitVeneer(int max_reachable_pc,
1796 int margin = kVeneerDistanceMargin);
1797 bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
1798 return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
1799 }
1800
1801 // The maximum code size generated for a veneer. Currently one branch
1802 // instruction. This is for code size checking purposes, and can be extended
1803 // in the future for example if we decide to add nops between the veneers.
1804 static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
1805
1806 void RecordVeneerPool(int location_offset, int size);
1807 // Emits veneers for branches that are approaching their maximum range.
1808 // If need_protection is true, the veneers are protected by a branch jumping
1809 // over the code.
1810 void EmitVeneers(bool force_emit, bool need_protection,
1811 int margin = kVeneerDistanceMargin);
1812 void EmitVeneersGuard() { EmitPoolGuard(); }
1813 // Checks whether veneers need to be emitted at this point.
1814 // If force_emit is set, a veneer is generated for *all* unresolved branches.
1815 void CheckVeneerPool(bool force_emit, bool require_jump,
1816 int margin = kVeneerDistanceMargin);
1817
1818 class BlockPoolsScope {
1819 public:
1820 explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
1821 assem_->StartBlockPools();
1822 }
1823 ~BlockPoolsScope() {
1824 assem_->EndBlockPools();
1825 }
1826
1827 private:
1828 Assembler* assem_;
1829
1830 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
1831 };
1832
1833 protected:
1834 inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
1835
1836 void LoadStore(const CPURegister& rt,
1837 const MemOperand& addr,
1838 LoadStoreOp op);
1839
1840 void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
1841 const MemOperand& addr, LoadStorePairOp op);
1842 static bool IsImmLSPair(int64_t offset, LSDataSize size);
1843
1844 void Logical(const Register& rd,
1845 const Register& rn,
1846 const Operand& operand,
1847 LogicalOp op);
1848 void LogicalImmediate(const Register& rd,
1849 const Register& rn,
1850 unsigned n,
1851 unsigned imm_s,
1852 unsigned imm_r,
1853 LogicalOp op);
1854
1855 void ConditionalCompare(const Register& rn,
1856 const Operand& operand,
1857 StatusFlags nzcv,
1858 Condition cond,
1859 ConditionalCompareOp op);
1860 static bool IsImmConditionalCompare(int64_t immediate);
1861
1862 void AddSubWithCarry(const Register& rd,
1863 const Register& rn,
1864 const Operand& operand,
1865 FlagsUpdate S,
1866 AddSubWithCarryOp op);
1867
1868 // Functions for emulating operands not directly supported by the instruction
1869 // set.
1870 void EmitShift(const Register& rd,
1871 const Register& rn,
1872 Shift shift,
1873 unsigned amount);
1874 void EmitExtendShift(const Register& rd,
1875 const Register& rn,
1876 Extend extend,
1877 unsigned left_shift);
1878
1879 void AddSub(const Register& rd,
1880 const Register& rn,
1881 const Operand& operand,
1882 FlagsUpdate S,
1883 AddSubOp op);
1884
1885 static bool IsImmFP32(float imm);
1886 static bool IsImmFP64(double imm);
1887
1888 // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
1889 // registers. Only simple loads are supported; sign- and zero-extension (such
1890 // as in LDPSW_x or LDRB_w) are not supported.
1891 static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
1892 static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
1893 const CPURegister& rt2);
1894 static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
1895 static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
1896 const CPURegister& rt2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001897 static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
1898
1899 // Remove the specified branch from the unbound label link chain.
1900 // If available, a veneer for this label can be used for other branches in the
1901 // chain if the link chain cannot be fixed up without this branch.
1902 void RemoveBranchFromLabelLinkChain(Instruction* branch,
1903 Label* label,
1904 Instruction* label_veneer = NULL);
1905
1906 private:
1907 // Instruction helpers.
1908 void MoveWide(const Register& rd,
1909 uint64_t imm,
1910 int shift,
1911 MoveWideImmediateOp mov_op);
1912 void DataProcShiftedRegister(const Register& rd,
1913 const Register& rn,
1914 const Operand& operand,
1915 FlagsUpdate S,
1916 Instr op);
1917 void DataProcExtendedRegister(const Register& rd,
1918 const Register& rn,
1919 const Operand& operand,
1920 FlagsUpdate S,
1921 Instr op);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001922 void ConditionalSelect(const Register& rd,
1923 const Register& rn,
1924 const Register& rm,
1925 Condition cond,
1926 ConditionalSelectOp op);
1927 void DataProcessing1Source(const Register& rd,
1928 const Register& rn,
1929 DataProcessing1SourceOp op);
1930 void DataProcessing3Source(const Register& rd,
1931 const Register& rn,
1932 const Register& rm,
1933 const Register& ra,
1934 DataProcessing3SourceOp op);
1935 void FPDataProcessing1Source(const FPRegister& fd,
1936 const FPRegister& fn,
1937 FPDataProcessing1SourceOp op);
1938 void FPDataProcessing2Source(const FPRegister& fd,
1939 const FPRegister& fn,
1940 const FPRegister& fm,
1941 FPDataProcessing2SourceOp op);
1942 void FPDataProcessing3Source(const FPRegister& fd,
1943 const FPRegister& fn,
1944 const FPRegister& fm,
1945 const FPRegister& fa,
1946 FPDataProcessing3SourceOp op);
1947
1948 // Label helpers.
1949
1950 // Return an offset for a label-referencing instruction, typically a branch.
1951 int LinkAndGetByteOffsetTo(Label* label);
1952
1953 // This is the same as LinkAndGetByteOffsetTo, but return an offset
1954 // suitable for fields that take instruction offsets.
1955 inline int LinkAndGetInstructionOffsetTo(Label* label);
1956
1957 static const int kStartOfLabelLinkChain = 0;
1958
1959 // Verify that a label's link chain is intact.
1960 void CheckLabelLinkChain(Label const * label);
1961
1962 void RecordLiteral(int64_t imm, unsigned size);
1963
1964 // Postpone the generation of the constant pool for the specified number of
1965 // instructions.
1966 void BlockConstPoolFor(int instructions);
1967
1968 // Set how far from current pc the next constant pool check will be.
1969 void SetNextConstPoolCheckIn(int instructions) {
1970 next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
1971 }
1972
1973 // Emit the instruction at pc_.
1974 void Emit(Instr instruction) {
1975 STATIC_ASSERT(sizeof(*pc_) == 1);
1976 STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
1977 DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
1978
1979 memcpy(pc_, &instruction, sizeof(instruction));
1980 pc_ += sizeof(instruction);
1981 CheckBuffer();
1982 }
1983
1984 // Emit data inline in the instruction stream.
1985 void EmitData(void const * data, unsigned size) {
1986 DCHECK(sizeof(*pc_) == 1);
1987 DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
1988
1989 // TODO(all): Somehow register we have some data here. Then we can
1990 // disassemble it correctly.
1991 memcpy(pc_, data, size);
1992 pc_ += size;
1993 CheckBuffer();
1994 }
1995
1996 void GrowBuffer();
1997 void CheckBufferSpace();
1998 void CheckBuffer();
1999
2000 // Pc offset of the next constant pool check.
2001 int next_constant_pool_check_;
2002
2003 // Constant pool generation
2004 // Pools are emitted in the instruction stream. They are emitted when:
2005 // * the distance to the first use is above a pre-defined distance or
2006 // * the numbers of entries in the pool is above a pre-defined size or
2007 // * code generation is finished
2008 // If a pool needs to be emitted before code generation is finished a branch
2009 // over the emitted pool will be inserted.
2010
2011 // Constants in the pool may be addresses of functions that gets relocated;
2012 // if so, a relocation info entry is associated to the constant pool entry.
2013
2014 // Repeated checking whether the constant pool should be emitted is rather
2015 // expensive. By default we only check again once a number of instructions
2016 // has been generated. That also means that the sizing of the buffers is not
2017 // an exact science, and that we rely on some slop to not overrun buffers.
2018 static const int kCheckConstPoolInterval = 128;
2019
2020 // Distance to first use after a which a pool will be emitted. Pool entries
2021 // are accessed with pc relative load therefore this cannot be more than
2022 // 1 * MB. Since constant pool emission checks are interval based this value
2023 // is an approximation.
2024 static const int kApproxMaxDistToConstPool = 64 * KB;
2025
2026 // Number of pool entries after which a pool will be emitted. Since constant
2027 // pool emission checks are interval based this value is an approximation.
2028 static const int kApproxMaxPoolEntryCount = 512;
2029
2030 // Emission of the constant pool may be blocked in some code sequences.
2031 int const_pool_blocked_nesting_; // Block emission if this is not zero.
2032 int no_const_pool_before_; // Block emission before this pc offset.
2033
2034 // Emission of the veneer pools may be blocked in some code sequences.
2035 int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
2036
2037 // Relocation info generation
2038 // Each relocation is encoded as a variable size value
2039 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
2040 RelocInfoWriter reloc_info_writer;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002041 // Internal reference positions, required for (potential) patching in
2042 // GrowBuffer(); contains only those internal references whose labels
2043 // are already bound.
2044 std::deque<int> internal_reference_positions_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002045
2046 // Relocation info records are also used during code generation as temporary
2047 // containers for constants and code target addresses until they are emitted
2048 // to the constant pool. These pending relocation info records are temporarily
2049 // stored in a separate buffer until a constant pool is emitted.
2050 // If every instruction in a long sequence is accessing the pool, we need one
2051 // pending relocation entry per instruction.
2052
2053 // The pending constant pool.
2054 ConstPool constpool_;
2055
2056 // Relocation for a type-recording IC has the AST id added to it. This
2057 // member variable is a way to pass the information from the call site to
2058 // the relocation info.
2059 TypeFeedbackId recorded_ast_id_;
2060
2061 inline TypeFeedbackId RecordedAstId();
2062 inline void ClearRecordedAstId();
2063
2064 protected:
2065 // Record the AST id of the CallIC being compiled, so that it can be placed
2066 // in the relocation information.
2067 void SetRecordedAstId(TypeFeedbackId ast_id) {
2068 DCHECK(recorded_ast_id_.IsNone());
2069 recorded_ast_id_ = ast_id;
2070 }
2071
2072 // Code generation
2073 // The relocation writer's position is at least kGap bytes below the end of
2074 // the generated instructions. This is so that multi-instruction sequences do
2075 // not have to check for overflow. The same is true for writes of large
2076 // relocation info entries, and debug strings encoded in the instruction
2077 // stream.
2078 static const int kGap = 128;
2079
2080 public:
2081 class FarBranchInfo {
2082 public:
2083 FarBranchInfo(int offset, Label* label)
2084 : pc_offset_(offset), label_(label) {}
2085 // Offset of the branch in the code generation buffer.
2086 int pc_offset_;
2087 // The label branched to.
2088 Label* label_;
2089 };
2090
2091 protected:
2092 // Information about unresolved (forward) branches.
2093 // The Assembler is only allowed to delete out-of-date information from here
2094 // after a label is bound. The MacroAssembler uses this information to
2095 // generate veneers.
2096 //
2097 // The second member gives information about the unresolved branch. The first
2098 // member of the pair is the maximum offset that the branch can reach in the
2099 // buffer. The map is sorted according to this reachable offset, allowing to
2100 // easily check when veneers need to be emitted.
2101 // Note that the maximum reachable offset (first member of the pairs) should
2102 // always be positive but has the same type as the return value for
2103 // pc_offset() for convenience.
2104 std::multimap<int, FarBranchInfo> unresolved_branches_;
2105
2106 // We generate a veneer for a branch if we reach within this distance of the
2107 // limit of the range.
2108 static const int kVeneerDistanceMargin = 1 * KB;
2109 // The factor of 2 is a finger in the air guess. With a default margin of
2110 // 1KB, that leaves us an addional 256 instructions to avoid generating a
2111 // protective branch.
2112 static const int kVeneerNoProtectionFactor = 2;
2113 static const int kVeneerDistanceCheckMargin =
2114 kVeneerNoProtectionFactor * kVeneerDistanceMargin;
2115 int unresolved_branches_first_limit() const {
2116 DCHECK(!unresolved_branches_.empty());
2117 return unresolved_branches_.begin()->first;
2118 }
2119 // This is similar to next_constant_pool_check_ and helps reduce the overhead
2120 // of checking for veneer pools.
2121 // It is maintained to the closest unresolved branch limit minus the maximum
2122 // veneer margin (or kMaxInt if there are no unresolved branches).
2123 int next_veneer_pool_check_;
2124
2125 private:
2126 // If a veneer is emitted for a branch instruction, that instruction must be
2127 // removed from the associated label's link chain so that the assembler does
2128 // not later attempt (likely unsuccessfully) to patch it to branch directly to
2129 // the label.
2130 void DeleteUnresolvedBranchInfoForLabel(Label* label);
2131 // This function deletes the information related to the label by traversing
2132 // the label chain, and for each PC-relative instruction in the chain checking
2133 // if pending unresolved information exists. Its complexity is proportional to
2134 // the length of the label chain.
2135 void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
2136
2137 private:
2138 PositionsRecorder positions_recorder_;
2139 friend class PositionsRecorder;
2140 friend class EnsureSpace;
2141 friend class ConstPool;
2142};
2143
2144class PatchingAssembler : public Assembler {
2145 public:
2146 // Create an Assembler with a buffer starting at 'start'.
2147 // The buffer size is
2148 // size of instructions to patch + kGap
2149 // Where kGap is the distance from which the Assembler tries to grow the
2150 // buffer.
2151 // If more or fewer instructions than expected are generated or if some
2152 // relocation information takes space in the buffer, the PatchingAssembler
2153 // will crash trying to grow the buffer.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002154 PatchingAssembler(Isolate* isolate, Instruction* start, unsigned count)
2155 : Assembler(isolate, reinterpret_cast<byte*>(start),
2156 count * kInstructionSize + kGap) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002157 StartBlockPools();
2158 }
2159
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002160 PatchingAssembler(Isolate* isolate, byte* start, unsigned count)
2161 : Assembler(isolate, start, count * kInstructionSize + kGap) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002162 // Block constant pool emission.
2163 StartBlockPools();
2164 }
2165
2166 ~PatchingAssembler() {
2167 // Const pool should still be blocked.
2168 DCHECK(is_const_pool_blocked());
2169 EndBlockPools();
2170 // Verify we have generated the number of instruction we expected.
2171 DCHECK((pc_offset() + kGap) == buffer_size_);
2172 // Verify no relocation information has been emitted.
2173 DCHECK(IsConstPoolEmpty());
2174 // Flush the Instruction cache.
2175 size_t length = buffer_size_ - kGap;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002176 Assembler::FlushICache(isolate(), buffer_, length);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002177 }
2178
2179 // See definition of PatchAdrFar() for details.
2180 static const int kAdrFarPatchableNNops = 2;
2181 static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
2182 void PatchAdrFar(int64_t target_offset);
2183};
2184
2185
2186class EnsureSpace BASE_EMBEDDED {
2187 public:
2188 explicit EnsureSpace(Assembler* assembler) {
2189 assembler->CheckBufferSpace();
2190 }
2191};
2192
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002193} // namespace internal
2194} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002195
2196#endif // V8_ARM64_ASSEMBLER_ARM64_H_