blob: 78997d6d020ce8b18a96eb5d54faaba4f0b3c738 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
6#define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
7
8#include <vector>
9
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000010#include "src/arm64/assembler-arm64.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000011#include "src/bailout-reason.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/base/bits.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000013#include "src/globals.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014
15// Simulator specific helpers.
16#if USE_SIMULATOR
17 // TODO(all): If possible automatically prepend an indicator like
18 // UNIMPLEMENTED or LOCATION.
19 #define ASM_UNIMPLEMENTED(message) \
20 __ Debug(message, __LINE__, NO_PARAM)
21 #define ASM_UNIMPLEMENTED_BREAK(message) \
22 __ Debug(message, __LINE__, \
23 FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
24 #define ASM_LOCATION(message) \
25 __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
26#else
27 #define ASM_UNIMPLEMENTED(message)
28 #define ASM_UNIMPLEMENTED_BREAK(message)
29 #define ASM_LOCATION(message)
30#endif
31
32
33namespace v8 {
34namespace internal {
35
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000036// Give alias names to registers for calling conventions.
37// TODO(titzer): arm64 is a pain for aliasing; get rid of these macros
38#define kReturnRegister0 x0
39#define kReturnRegister1 x1
40#define kJSFunctionRegister x1
41#define kContextRegister cp
42#define kInterpreterAccumulatorRegister x0
43#define kInterpreterRegisterFileRegister x18
44#define kInterpreterBytecodeOffsetRegister x19
45#define kInterpreterBytecodeArrayRegister x20
46#define kInterpreterDispatchTableRegister x21
47#define kJavaScriptCallArgCountRegister x0
48#define kJavaScriptCallNewTargetRegister x3
49#define kRuntimeCallFunctionRegister x1
50#define kRuntimeCallArgCountRegister x0
51
Ben Murdochb8a8cc12014-11-26 15:28:44 +000052#define LS_MACRO_LIST(V) \
53 V(Ldrb, Register&, rt, LDRB_w) \
54 V(Strb, Register&, rt, STRB_w) \
55 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
56 V(Ldrh, Register&, rt, LDRH_w) \
57 V(Strh, Register&, rt, STRH_w) \
58 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
59 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
60 V(Str, CPURegister&, rt, StoreOpFor(rt)) \
61 V(Ldrsw, Register&, rt, LDRSW_x)
62
63#define LSPAIR_MACRO_LIST(V) \
64 V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
65 V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
66 V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
67
68
69// ----------------------------------------------------------------------------
70// Static helper functions
71
72// Generate a MemOperand for loading a field from an object.
73inline MemOperand FieldMemOperand(Register object, int offset);
74inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
75
76// Generate a MemOperand for loading a SMI from memory.
77inline MemOperand UntagSmiMemOperand(Register object, int offset);
78
79
80// ----------------------------------------------------------------------------
81// MacroAssembler
82
83enum BranchType {
84 // Copies of architectural conditions.
85 // The associated conditions can be used in place of those, the code will
86 // take care of reinterpreting them with the correct type.
87 integer_eq = eq,
88 integer_ne = ne,
89 integer_hs = hs,
90 integer_lo = lo,
91 integer_mi = mi,
92 integer_pl = pl,
93 integer_vs = vs,
94 integer_vc = vc,
95 integer_hi = hi,
96 integer_ls = ls,
97 integer_ge = ge,
98 integer_lt = lt,
99 integer_gt = gt,
100 integer_le = le,
101 integer_al = al,
102 integer_nv = nv,
103
104 // These two are *different* from the architectural codes al and nv.
105 // 'always' is used to generate unconditional branches.
106 // 'never' is used to not generate a branch (generally as the inverse
107 // branch type of 'always).
108 always, never,
109 // cbz and cbnz
110 reg_zero, reg_not_zero,
111 // tbz and tbnz
112 reg_bit_clear, reg_bit_set,
113
114 // Aliases.
115 kBranchTypeFirstCondition = eq,
116 kBranchTypeLastCondition = nv,
117 kBranchTypeFirstUsingReg = reg_zero,
118 kBranchTypeFirstUsingBit = reg_bit_clear
119};
120
121inline BranchType InvertBranchType(BranchType type) {
122 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
123 return static_cast<BranchType>(
124 NegateCondition(static_cast<Condition>(type)));
125 } else {
126 return static_cast<BranchType>(type ^ 1);
127 }
128}
129
130enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
131enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
132enum PointersToHereCheck {
133 kPointersToHereMaybeInteresting,
134 kPointersToHereAreAlwaysInteresting
135};
136enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
137enum TargetAddressStorageMode {
138 CAN_INLINE_TARGET_ADDRESS,
139 NEVER_INLINE_TARGET_ADDRESS
140};
141enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
142enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
143enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
144enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
145enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
146
147class MacroAssembler : public Assembler {
148 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000149 MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
150 CodeObjectRequired create_code_object);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000151
152 inline Handle<Object> CodeObject();
153
154 // Instruction set functions ------------------------------------------------
155 // Logical macros.
156 inline void And(const Register& rd,
157 const Register& rn,
158 const Operand& operand);
159 inline void Ands(const Register& rd,
160 const Register& rn,
161 const Operand& operand);
162 inline void Bic(const Register& rd,
163 const Register& rn,
164 const Operand& operand);
165 inline void Bics(const Register& rd,
166 const Register& rn,
167 const Operand& operand);
168 inline void Orr(const Register& rd,
169 const Register& rn,
170 const Operand& operand);
171 inline void Orn(const Register& rd,
172 const Register& rn,
173 const Operand& operand);
174 inline void Eor(const Register& rd,
175 const Register& rn,
176 const Operand& operand);
177 inline void Eon(const Register& rd,
178 const Register& rn,
179 const Operand& operand);
180 inline void Tst(const Register& rn, const Operand& operand);
181 void LogicalMacro(const Register& rd,
182 const Register& rn,
183 const Operand& operand,
184 LogicalOp op);
185
186 // Add and sub macros.
187 inline void Add(const Register& rd,
188 const Register& rn,
189 const Operand& operand);
190 inline void Adds(const Register& rd,
191 const Register& rn,
192 const Operand& operand);
193 inline void Sub(const Register& rd,
194 const Register& rn,
195 const Operand& operand);
196 inline void Subs(const Register& rd,
197 const Register& rn,
198 const Operand& operand);
199 inline void Cmn(const Register& rn, const Operand& operand);
200 inline void Cmp(const Register& rn, const Operand& operand);
201 inline void Neg(const Register& rd,
202 const Operand& operand);
203 inline void Negs(const Register& rd,
204 const Operand& operand);
205
206 void AddSubMacro(const Register& rd,
207 const Register& rn,
208 const Operand& operand,
209 FlagsUpdate S,
210 AddSubOp op);
211
212 // Add/sub with carry macros.
213 inline void Adc(const Register& rd,
214 const Register& rn,
215 const Operand& operand);
216 inline void Adcs(const Register& rd,
217 const Register& rn,
218 const Operand& operand);
219 inline void Sbc(const Register& rd,
220 const Register& rn,
221 const Operand& operand);
222 inline void Sbcs(const Register& rd,
223 const Register& rn,
224 const Operand& operand);
225 inline void Ngc(const Register& rd,
226 const Operand& operand);
227 inline void Ngcs(const Register& rd,
228 const Operand& operand);
229 void AddSubWithCarryMacro(const Register& rd,
230 const Register& rn,
231 const Operand& operand,
232 FlagsUpdate S,
233 AddSubWithCarryOp op);
234
235 // Move macros.
236 void Mov(const Register& rd,
237 const Operand& operand,
238 DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
239 void Mov(const Register& rd, uint64_t imm);
240 inline void Mvn(const Register& rd, uint64_t imm);
241 void Mvn(const Register& rd, const Operand& operand);
242 static bool IsImmMovn(uint64_t imm, unsigned reg_size);
243 static bool IsImmMovz(uint64_t imm, unsigned reg_size);
244 static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
245
246 // Try to move an immediate into the destination register in a single
247 // instruction. Returns true for success, and updates the contents of dst.
248 // Returns false, otherwise.
249 bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
250
251 // Move an immediate into register dst, and return an Operand object for use
252 // with a subsequent instruction that accepts a shift. The value moved into
253 // dst is not necessarily equal to imm; it may have had a shifting operation
254 // applied to it that will be subsequently undone by the shift applied in the
255 // Operand.
256 Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
257
258 // Conditional macros.
259 inline void Ccmp(const Register& rn,
260 const Operand& operand,
261 StatusFlags nzcv,
262 Condition cond);
263 inline void Ccmn(const Register& rn,
264 const Operand& operand,
265 StatusFlags nzcv,
266 Condition cond);
267 void ConditionalCompareMacro(const Register& rn,
268 const Operand& operand,
269 StatusFlags nzcv,
270 Condition cond,
271 ConditionalCompareOp op);
272 void Csel(const Register& rd,
273 const Register& rn,
274 const Operand& operand,
275 Condition cond);
276
277 // Load/store macros.
278#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
279 inline void FN(const REGTYPE REG, const MemOperand& addr);
280 LS_MACRO_LIST(DECLARE_FUNCTION)
281#undef DECLARE_FUNCTION
282
283 void LoadStoreMacro(const CPURegister& rt,
284 const MemOperand& addr,
285 LoadStoreOp op);
286
287#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
288 inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
289 LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
290#undef DECLARE_FUNCTION
291
292 void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
293 const MemOperand& addr, LoadStorePairOp op);
294
295 // V8-specific load/store helpers.
296 void Load(const Register& rt, const MemOperand& addr, Representation r);
297 void Store(const Register& rt, const MemOperand& addr, Representation r);
298
299 enum AdrHint {
300 // The target must be within the immediate range of adr.
301 kAdrNear,
302 // The target may be outside of the immediate range of adr. Additional
303 // instructions may be emitted.
304 kAdrFar
305 };
306 void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
307
308 // Remaining instructions are simple pass-through calls to the assembler.
309 inline void Asr(const Register& rd, const Register& rn, unsigned shift);
310 inline void Asr(const Register& rd, const Register& rn, const Register& rm);
311
312 // Branch type inversion relies on these relations.
313 STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
314 (reg_bit_clear == (reg_bit_set ^ 1)) &&
315 (always == (never ^ 1)));
316
317 void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
318
319 inline void B(Label* label);
320 inline void B(Condition cond, Label* label);
321 void B(Label* label, Condition cond);
322 inline void Bfi(const Register& rd,
323 const Register& rn,
324 unsigned lsb,
325 unsigned width);
326 inline void Bfxil(const Register& rd,
327 const Register& rn,
328 unsigned lsb,
329 unsigned width);
330 inline void Bind(Label* label);
331 inline void Bl(Label* label);
332 inline void Blr(const Register& xn);
333 inline void Br(const Register& xn);
334 inline void Brk(int code);
335 void Cbnz(const Register& rt, Label* label);
336 void Cbz(const Register& rt, Label* label);
337 inline void Cinc(const Register& rd, const Register& rn, Condition cond);
338 inline void Cinv(const Register& rd, const Register& rn, Condition cond);
339 inline void Cls(const Register& rd, const Register& rn);
340 inline void Clz(const Register& rd, const Register& rn);
341 inline void Cneg(const Register& rd, const Register& rn, Condition cond);
342 inline void CzeroX(const Register& rd, Condition cond);
343 inline void CmovX(const Register& rd, const Register& rn, Condition cond);
344 inline void Cset(const Register& rd, Condition cond);
345 inline void Csetm(const Register& rd, Condition cond);
346 inline void Csinc(const Register& rd,
347 const Register& rn,
348 const Register& rm,
349 Condition cond);
350 inline void Csinv(const Register& rd,
351 const Register& rn,
352 const Register& rm,
353 Condition cond);
354 inline void Csneg(const Register& rd,
355 const Register& rn,
356 const Register& rm,
357 Condition cond);
358 inline void Dmb(BarrierDomain domain, BarrierType type);
359 inline void Dsb(BarrierDomain domain, BarrierType type);
360 inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
361 inline void Extr(const Register& rd,
362 const Register& rn,
363 const Register& rm,
364 unsigned lsb);
365 inline void Fabs(const FPRegister& fd, const FPRegister& fn);
366 inline void Fadd(const FPRegister& fd,
367 const FPRegister& fn,
368 const FPRegister& fm);
369 inline void Fccmp(const FPRegister& fn,
370 const FPRegister& fm,
371 StatusFlags nzcv,
372 Condition cond);
373 inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
374 inline void Fcmp(const FPRegister& fn, double value);
375 inline void Fcsel(const FPRegister& fd,
376 const FPRegister& fn,
377 const FPRegister& fm,
378 Condition cond);
379 inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
380 inline void Fcvtas(const Register& rd, const FPRegister& fn);
381 inline void Fcvtau(const Register& rd, const FPRegister& fn);
382 inline void Fcvtms(const Register& rd, const FPRegister& fn);
383 inline void Fcvtmu(const Register& rd, const FPRegister& fn);
384 inline void Fcvtns(const Register& rd, const FPRegister& fn);
385 inline void Fcvtnu(const Register& rd, const FPRegister& fn);
386 inline void Fcvtzs(const Register& rd, const FPRegister& fn);
387 inline void Fcvtzu(const Register& rd, const FPRegister& fn);
388 inline void Fdiv(const FPRegister& fd,
389 const FPRegister& fn,
390 const FPRegister& fm);
391 inline void Fmadd(const FPRegister& fd,
392 const FPRegister& fn,
393 const FPRegister& fm,
394 const FPRegister& fa);
395 inline void Fmax(const FPRegister& fd,
396 const FPRegister& fn,
397 const FPRegister& fm);
398 inline void Fmaxnm(const FPRegister& fd,
399 const FPRegister& fn,
400 const FPRegister& fm);
401 inline void Fmin(const FPRegister& fd,
402 const FPRegister& fn,
403 const FPRegister& fm);
404 inline void Fminnm(const FPRegister& fd,
405 const FPRegister& fn,
406 const FPRegister& fm);
407 inline void Fmov(FPRegister fd, FPRegister fn);
408 inline void Fmov(FPRegister fd, Register rn);
409 // Provide explicit double and float interfaces for FP immediate moves, rather
410 // than relying on implicit C++ casts. This allows signalling NaNs to be
411 // preserved when the immediate matches the format of fd. Most systems convert
412 // signalling NaNs to quiet NaNs when converting between float and double.
413 inline void Fmov(FPRegister fd, double imm);
414 inline void Fmov(FPRegister fd, float imm);
415 // Provide a template to allow other types to be converted automatically.
416 template<typename T>
417 void Fmov(FPRegister fd, T imm) {
418 DCHECK(allow_macro_instructions_);
419 Fmov(fd, static_cast<double>(imm));
420 }
421 inline void Fmov(Register rd, FPRegister fn);
422 inline void Fmsub(const FPRegister& fd,
423 const FPRegister& fn,
424 const FPRegister& fm,
425 const FPRegister& fa);
426 inline void Fmul(const FPRegister& fd,
427 const FPRegister& fn,
428 const FPRegister& fm);
429 inline void Fneg(const FPRegister& fd, const FPRegister& fn);
430 inline void Fnmadd(const FPRegister& fd,
431 const FPRegister& fn,
432 const FPRegister& fm,
433 const FPRegister& fa);
434 inline void Fnmsub(const FPRegister& fd,
435 const FPRegister& fn,
436 const FPRegister& fm,
437 const FPRegister& fa);
438 inline void Frinta(const FPRegister& fd, const FPRegister& fn);
439 inline void Frintm(const FPRegister& fd, const FPRegister& fn);
440 inline void Frintn(const FPRegister& fd, const FPRegister& fn);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400441 inline void Frintp(const FPRegister& fd, const FPRegister& fn);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000442 inline void Frintz(const FPRegister& fd, const FPRegister& fn);
443 inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
444 inline void Fsub(const FPRegister& fd,
445 const FPRegister& fn,
446 const FPRegister& fm);
447 inline void Hint(SystemHint code);
448 inline void Hlt(int code);
449 inline void Isb();
450 inline void Ldnp(const CPURegister& rt,
451 const CPURegister& rt2,
452 const MemOperand& src);
453 // Load a literal from the inline constant pool.
454 inline void Ldr(const CPURegister& rt, const Immediate& imm);
455 // Helper function for double immediate.
456 inline void Ldr(const CPURegister& rt, double imm);
457 inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
458 inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
459 inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
460 inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
461 inline void Madd(const Register& rd,
462 const Register& rn,
463 const Register& rm,
464 const Register& ra);
465 inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
466 inline void Mov(const Register& rd, const Register& rm);
467 inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
468 inline void Mrs(const Register& rt, SystemRegister sysreg);
469 inline void Msr(SystemRegister sysreg, const Register& rt);
470 inline void Msub(const Register& rd,
471 const Register& rn,
472 const Register& rm,
473 const Register& ra);
474 inline void Mul(const Register& rd, const Register& rn, const Register& rm);
475 inline void Nop() { nop(); }
476 inline void Rbit(const Register& rd, const Register& rn);
477 inline void Ret(const Register& xn = lr);
478 inline void Rev(const Register& rd, const Register& rn);
479 inline void Rev16(const Register& rd, const Register& rn);
480 inline void Rev32(const Register& rd, const Register& rn);
481 inline void Ror(const Register& rd, const Register& rs, unsigned shift);
482 inline void Ror(const Register& rd, const Register& rn, const Register& rm);
483 inline void Sbfiz(const Register& rd,
484 const Register& rn,
485 unsigned lsb,
486 unsigned width);
487 inline void Sbfx(const Register& rd,
488 const Register& rn,
489 unsigned lsb,
490 unsigned width);
491 inline void Scvtf(const FPRegister& fd,
492 const Register& rn,
493 unsigned fbits = 0);
494 inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
495 inline void Smaddl(const Register& rd,
496 const Register& rn,
497 const Register& rm,
498 const Register& ra);
499 inline void Smsubl(const Register& rd,
500 const Register& rn,
501 const Register& rm,
502 const Register& ra);
503 inline void Smull(const Register& rd,
504 const Register& rn,
505 const Register& rm);
506 inline void Smulh(const Register& rd,
507 const Register& rn,
508 const Register& rm);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400509 inline void Umull(const Register& rd, const Register& rn, const Register& rm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000510 inline void Stnp(const CPURegister& rt,
511 const CPURegister& rt2,
512 const MemOperand& dst);
513 inline void Sxtb(const Register& rd, const Register& rn);
514 inline void Sxth(const Register& rd, const Register& rn);
515 inline void Sxtw(const Register& rd, const Register& rn);
516 void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
517 void Tbz(const Register& rt, unsigned bit_pos, Label* label);
518 inline void Ubfiz(const Register& rd,
519 const Register& rn,
520 unsigned lsb,
521 unsigned width);
522 inline void Ubfx(const Register& rd,
523 const Register& rn,
524 unsigned lsb,
525 unsigned width);
526 inline void Ucvtf(const FPRegister& fd,
527 const Register& rn,
528 unsigned fbits = 0);
529 inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
530 inline void Umaddl(const Register& rd,
531 const Register& rn,
532 const Register& rm,
533 const Register& ra);
534 inline void Umsubl(const Register& rd,
535 const Register& rn,
536 const Register& rm,
537 const Register& ra);
538 inline void Uxtb(const Register& rd, const Register& rn);
539 inline void Uxth(const Register& rd, const Register& rn);
540 inline void Uxtw(const Register& rd, const Register& rn);
541
542 // Pseudo-instructions ------------------------------------------------------
543
544 // Compute rd = abs(rm).
545 // This function clobbers the condition flags. On output the overflow flag is
546 // set iff the negation overflowed.
547 //
548 // If rm is the minimum representable value, the result is not representable.
549 // Handlers for each case can be specified using the relevant labels.
550 void Abs(const Register& rd, const Register& rm,
551 Label * is_not_representable = NULL,
552 Label * is_representable = NULL);
553
554 // Push or pop up to 4 registers of the same width to or from the stack,
555 // using the current stack pointer as set by SetStackPointer.
556 //
557 // If an argument register is 'NoReg', all further arguments are also assumed
558 // to be 'NoReg', and are thus not pushed or popped.
559 //
560 // Arguments are ordered such that "Push(a, b);" is functionally equivalent
561 // to "Push(a); Push(b);".
562 //
563 // It is valid to push the same register more than once, and there is no
564 // restriction on the order in which registers are specified.
565 //
566 // It is not valid to pop into the same register more than once in one
567 // operation, not even into the zero register.
568 //
569 // If the current stack pointer (as set by SetStackPointer) is csp, then it
570 // must be aligned to 16 bytes on entry and the total size of the specified
571 // registers must also be a multiple of 16 bytes.
572 //
573 // Even if the current stack pointer is not the system stack pointer (csp),
574 // Push (and derived methods) will still modify the system stack pointer in
575 // order to comply with ABI rules about accessing memory below the system
576 // stack pointer.
577 //
578 // Other than the registers passed into Pop, the stack pointer and (possibly)
579 // the system stack pointer, these methods do not modify any other registers.
580 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
581 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
582 void Push(const CPURegister& src0, const CPURegister& src1,
583 const CPURegister& src2, const CPURegister& src3,
584 const CPURegister& src4, const CPURegister& src5 = NoReg,
585 const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
586 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
587 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000588 void Pop(const CPURegister& dst0, const CPURegister& dst1,
589 const CPURegister& dst2, const CPURegister& dst3,
590 const CPURegister& dst4, const CPURegister& dst5 = NoReg,
591 const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000592 void Push(const Register& src0, const FPRegister& src1);
593
594 // Alternative forms of Push and Pop, taking a RegList or CPURegList that
595 // specifies the registers that are to be pushed or popped. Higher-numbered
596 // registers are associated with higher memory addresses (as in the A32 push
597 // and pop instructions).
598 //
599 // (Push|Pop)SizeRegList allow you to specify the register size as a
600 // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
601 // kSRegSizeInBits are supported.
602 //
603 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
604 void PushCPURegList(CPURegList registers);
605 void PopCPURegList(CPURegList registers);
606
607 inline void PushSizeRegList(RegList registers, unsigned reg_size,
608 CPURegister::RegisterType type = CPURegister::kRegister) {
609 PushCPURegList(CPURegList(type, reg_size, registers));
610 }
611 inline void PopSizeRegList(RegList registers, unsigned reg_size,
612 CPURegister::RegisterType type = CPURegister::kRegister) {
613 PopCPURegList(CPURegList(type, reg_size, registers));
614 }
615 inline void PushXRegList(RegList regs) {
616 PushSizeRegList(regs, kXRegSizeInBits);
617 }
618 inline void PopXRegList(RegList regs) {
619 PopSizeRegList(regs, kXRegSizeInBits);
620 }
621 inline void PushWRegList(RegList regs) {
622 PushSizeRegList(regs, kWRegSizeInBits);
623 }
624 inline void PopWRegList(RegList regs) {
625 PopSizeRegList(regs, kWRegSizeInBits);
626 }
627 inline void PushDRegList(RegList regs) {
628 PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
629 }
630 inline void PopDRegList(RegList regs) {
631 PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kFPRegister);
632 }
633 inline void PushSRegList(RegList regs) {
634 PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
635 }
636 inline void PopSRegList(RegList regs) {
637 PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kFPRegister);
638 }
639
640 // Push the specified register 'count' times.
641 void PushMultipleTimes(CPURegister src, Register count);
642 void PushMultipleTimes(CPURegister src, int count);
643
644 // This is a convenience method for pushing a single Handle<Object>.
645 inline void Push(Handle<Object> handle);
646 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
647
648 // Aliases of Push and Pop, required for V8 compatibility.
649 inline void push(Register src) {
650 Push(src);
651 }
652 inline void pop(Register dst) {
653 Pop(dst);
654 }
655
656 // Sometimes callers need to push or pop multiple registers in a way that is
657 // difficult to structure efficiently for fixed Push or Pop calls. This scope
658 // allows push requests to be queued up, then flushed at once. The
659 // MacroAssembler will try to generate the most efficient sequence required.
660 //
661 // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
662 // register sizes and types.
663 class PushPopQueue {
664 public:
665 explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
666
667 ~PushPopQueue() {
668 DCHECK(queued_.empty());
669 }
670
671 void Queue(const CPURegister& rt) {
672 size_ += rt.SizeInBytes();
673 queued_.push_back(rt);
674 }
675
676 enum PreambleDirective {
677 WITH_PREAMBLE,
678 SKIP_PREAMBLE
679 };
680 void PushQueued(PreambleDirective preamble_directive = WITH_PREAMBLE);
681 void PopQueued();
682
683 private:
684 MacroAssembler* masm_;
685 int size_;
686 std::vector<CPURegister> queued_;
687 };
688
689 // Poke 'src' onto the stack. The offset is in bytes.
690 //
691 // If the current stack pointer (according to StackPointer()) is csp, then
692 // csp must be aligned to 16 bytes.
693 void Poke(const CPURegister& src, const Operand& offset);
694
695 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
696 //
697 // If the current stack pointer (according to StackPointer()) is csp, then
698 // csp must be aligned to 16 bytes.
699 void Peek(const CPURegister& dst, const Operand& offset);
700
701 // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
702 // with 'src2' at a higher address than 'src1'. The offset is in bytes.
703 //
704 // If the current stack pointer (according to StackPointer()) is csp, then
705 // csp must be aligned to 16 bytes.
706 void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
707
708 // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
709 // values peeked will be adjacent, with the value in 'dst2' being from a
710 // higher address than 'dst1'. The offset is in bytes.
711 //
712 // If the current stack pointer (according to StackPointer()) is csp, then
713 // csp must be aligned to 16 bytes.
714 void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
715
716 // Claim or drop stack space without actually accessing memory.
717 //
718 // In debug mode, both of these will write invalid data into the claimed or
719 // dropped space.
720 //
721 // If the current stack pointer (according to StackPointer()) is csp, then it
722 // must be aligned to 16 bytes and the size claimed or dropped must be a
723 // multiple of 16 bytes.
724 //
725 // Note that unit_size must be specified in bytes. For variants which take a
726 // Register count, the unit size must be a power of two.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000727 inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000728 inline void Claim(const Register& count,
729 uint64_t unit_size = kXRegSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000730 inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000731 inline void Drop(const Register& count,
732 uint64_t unit_size = kXRegSize);
733
734 // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
735 // register.
736 inline void ClaimBySMI(const Register& count_smi,
737 uint64_t unit_size = kXRegSize);
738 inline void DropBySMI(const Register& count_smi,
739 uint64_t unit_size = kXRegSize);
740
741 // Compare a register with an operand, and branch to label depending on the
742 // condition. May corrupt the status flags.
743 inline void CompareAndBranch(const Register& lhs,
744 const Operand& rhs,
745 Condition cond,
746 Label* label);
747
748 // Test the bits of register defined by bit_pattern, and branch if ANY of
749 // those bits are set. May corrupt the status flags.
750 inline void TestAndBranchIfAnySet(const Register& reg,
751 const uint64_t bit_pattern,
752 Label* label);
753
754 // Test the bits of register defined by bit_pattern, and branch if ALL of
755 // those bits are clear (ie. not set.) May corrupt the status flags.
756 inline void TestAndBranchIfAllClear(const Register& reg,
757 const uint64_t bit_pattern,
758 Label* label);
759
760 // Insert one or more instructions into the instruction stream that encode
761 // some caller-defined data. The instructions used will be executable with no
762 // side effects.
763 inline void InlineData(uint64_t data);
764
765 // Insert an instrumentation enable marker into the instruction stream.
766 inline void EnableInstrumentation();
767
768 // Insert an instrumentation disable marker into the instruction stream.
769 inline void DisableInstrumentation();
770
771 // Insert an instrumentation event marker into the instruction stream. These
772 // will be picked up by the instrumentation system to annotate an instruction
773 // profile. The argument marker_name must be a printable two character string;
774 // it will be encoded in the event marker.
775 inline void AnnotateInstrumentation(const char* marker_name);
776
777 // If emit_debug_code() is true, emit a run-time check to ensure that
778 // StackPointer() does not point below the system stack pointer.
779 //
780 // Whilst it is architecturally legal for StackPointer() to point below csp,
781 // it can be evidence of a potential bug because the ABI forbids accesses
782 // below csp.
783 //
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400784 // If StackPointer() is the system stack pointer (csp), then csp will be
785 // dereferenced to cause the processor (or simulator) to abort if it is not
786 // properly aligned.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000787 //
788 // If emit_debug_code() is false, this emits no code.
789 void AssertStackConsistency();
790
791 // Preserve the callee-saved registers (as defined by AAPCS64).
792 //
793 // Higher-numbered registers are pushed before lower-numbered registers, and
794 // thus get higher addresses.
795 // Floating-point registers are pushed before general-purpose registers, and
796 // thus get higher addresses.
797 //
798 // Note that registers are not checked for invalid values. Use this method
799 // only if you know that the GC won't try to examine the values on the stack.
800 //
801 // This method must not be called unless the current stack pointer (as set by
802 // SetStackPointer) is the system stack pointer (csp), and is aligned to
803 // ActivationFrameAlignment().
804 void PushCalleeSavedRegisters();
805
806 // Restore the callee-saved registers (as defined by AAPCS64).
807 //
808 // Higher-numbered registers are popped after lower-numbered registers, and
809 // thus come from higher addresses.
810 // Floating-point registers are popped after general-purpose registers, and
811 // thus come from higher addresses.
812 //
813 // This method must not be called unless the current stack pointer (as set by
814 // SetStackPointer) is the system stack pointer (csp), and is aligned to
815 // ActivationFrameAlignment().
816 void PopCalleeSavedRegisters();
817
818 // Set the current stack pointer, but don't generate any code.
819 inline void SetStackPointer(const Register& stack_pointer) {
820 DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
821 sp_ = stack_pointer;
822 }
823
824 // Return the current stack pointer, as set by SetStackPointer.
825 inline const Register& StackPointer() const {
826 return sp_;
827 }
828
829 // Align csp for a frame, as per ActivationFrameAlignment, and make it the
830 // current stack pointer.
831 inline void AlignAndSetCSPForFrame() {
832 int sp_alignment = ActivationFrameAlignment();
833 // AAPCS64 mandates at least 16-byte alignment.
834 DCHECK(sp_alignment >= 16);
835 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
836 Bic(csp, StackPointer(), sp_alignment - 1);
837 SetStackPointer(csp);
838 }
839
840 // Push the system stack pointer (csp) down to allow the same to be done to
841 // the current stack pointer (according to StackPointer()). This must be
842 // called _before_ accessing the memory.
843 //
844 // This is necessary when pushing or otherwise adding things to the stack, to
845 // satisfy the AAPCS64 constraint that the memory below the system stack
846 // pointer is not accessed. The amount pushed will be increased as necessary
847 // to ensure csp remains aligned to 16 bytes.
848 //
849 // This method asserts that StackPointer() is not csp, since the call does
850 // not make sense in that context.
851 inline void BumpSystemStackPointer(const Operand& space);
852
853 // Re-synchronizes the system stack pointer (csp) with the current stack
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400854 // pointer (according to StackPointer()).
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000855 //
856 // This method asserts that StackPointer() is not csp, since the call does
857 // not make sense in that context.
858 inline void SyncSystemStackPointer();
859
860 // Helpers ------------------------------------------------------------------
861 // Root register.
862 inline void InitializeRootRegister();
863
864 void AssertFPCRState(Register fpcr = NoReg);
865 void ConfigureFPCR();
866 void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src);
867 void CanonicalizeNaN(const FPRegister& reg) {
868 CanonicalizeNaN(reg, reg);
869 }
870
871 // Load an object from the root table.
872 void LoadRoot(CPURegister destination,
873 Heap::RootListIndex index);
874 // Store an object to the root table.
875 void StoreRoot(Register source,
876 Heap::RootListIndex index);
877
878 // Load both TrueValue and FalseValue roots.
879 void LoadTrueFalseRoots(Register true_root, Register false_root);
880
881 void LoadHeapObject(Register dst, Handle<HeapObject> object);
882
883 void LoadObject(Register result, Handle<Object> object) {
884 AllowDeferredHandleDereference heap_object_check;
885 if (object->IsHeapObject()) {
886 LoadHeapObject(result, Handle<HeapObject>::cast(object));
887 } else {
888 DCHECK(object->IsSmi());
889 Mov(result, Operand(object));
890 }
891 }
892
893 static int SafepointRegisterStackIndex(int reg_code);
894
895 // This is required for compatibility with architecture independant code.
896 // Remove if not needed.
897 inline void Move(Register dst, Register src) { Mov(dst, src); }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000898 inline void Move(Register dst, Smi* src) { Mov(dst, src); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000899
900 void LoadInstanceDescriptors(Register map,
901 Register descriptors);
902 void EnumLengthUntagged(Register dst, Register map);
903 void EnumLengthSmi(Register dst, Register map);
904 void NumberOfOwnDescriptors(Register dst, Register map);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000905 void LoadAccessor(Register dst, Register holder, int accessor_index,
906 AccessorComponent accessor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000907
908 template<typename Field>
909 void DecodeField(Register dst, Register src) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000910 static const int shift = Field::kShift;
911 static const int setbits = CountSetBits(Field::kMask, 32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000912 Ubfx(dst, src, shift, setbits);
913 }
914
915 template<typename Field>
916 void DecodeField(Register reg) {
917 DecodeField<Field>(reg, reg);
918 }
919
920 // ---- SMI and Number Utilities ----
921
922 inline void SmiTag(Register dst, Register src);
923 inline void SmiTag(Register smi);
924 inline void SmiUntag(Register dst, Register src);
925 inline void SmiUntag(Register smi);
926 inline void SmiUntagToDouble(FPRegister dst,
927 Register src,
928 UntagMode mode = kNotSpeculativeUntag);
929 inline void SmiUntagToFloat(FPRegister dst,
930 Register src,
931 UntagMode mode = kNotSpeculativeUntag);
932
933 // Tag and push in one step.
934 inline void SmiTagAndPush(Register src);
935 inline void SmiTagAndPush(Register src1, Register src2);
936
937 inline void JumpIfSmi(Register value,
938 Label* smi_label,
939 Label* not_smi_label = NULL);
940 inline void JumpIfNotSmi(Register value, Label* not_smi_label);
941 inline void JumpIfBothSmi(Register value1,
942 Register value2,
943 Label* both_smi_label,
944 Label* not_smi_label = NULL);
945 inline void JumpIfEitherSmi(Register value1,
946 Register value2,
947 Label* either_smi_label,
948 Label* not_smi_label = NULL);
949 inline void JumpIfEitherNotSmi(Register value1,
950 Register value2,
951 Label* not_smi_label);
952 inline void JumpIfBothNotSmi(Register value1,
953 Register value2,
954 Label* not_smi_label);
955
956 // Abort execution if argument is a smi, enabled via --debug-code.
957 void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
958 void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
959
960 inline void ObjectTag(Register tagged_obj, Register obj);
961 inline void ObjectUntag(Register untagged_obj, Register obj);
962
963 // Abort execution if argument is not a name, enabled via --debug-code.
964 void AssertName(Register object);
965
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000966 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
967 void AssertFunction(Register object);
968
969 // Abort execution if argument is not a JSBoundFunction,
970 // enabled via --debug-code.
971 void AssertBoundFunction(Register object);
972
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000973 // Abort execution if argument is not undefined or an AllocationSite, enabled
974 // via --debug-code.
975 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
976
977 // Abort execution if argument is not a string, enabled via --debug-code.
978 void AssertString(Register object);
979
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000980 // Abort execution if argument is not a positive or zero integer, enabled via
981 // --debug-code.
982 void AssertPositiveOrZero(Register value);
983
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000984 void JumpIfHeapNumber(Register object, Label* on_heap_number,
985 SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
986 void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
987 SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
988
989 // Sets the vs flag if the input is -0.0.
990 void TestForMinusZero(DoubleRegister input);
991
992 // Jump to label if the input double register contains -0.0.
993 void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
994
995 // Jump to label if the input integer register contains the double precision
996 // floating point representation of -0.0.
997 void JumpIfMinusZero(Register input, Label* on_negative_zero);
998
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000999 // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
1000 // output.
1001 void ClampInt32ToUint8(Register in_out);
1002 void ClampInt32ToUint8(Register output, Register input);
1003
1004 // Saturate a double in input to an unsigned 8-bit integer in output.
1005 void ClampDoubleToUint8(Register output,
1006 DoubleRegister input,
1007 DoubleRegister dbl_scratch);
1008
1009 // Try to represent a double as a signed 32-bit int.
1010 // This succeeds if the result compares equal to the input, so inputs of -0.0
1011 // are represented as 0 and handled as a success.
1012 //
1013 // On output the Z flag is set if the operation was successful.
1014 void TryRepresentDoubleAsInt32(Register as_int,
1015 FPRegister value,
1016 FPRegister scratch_d,
1017 Label* on_successful_conversion = NULL,
1018 Label* on_failed_conversion = NULL) {
1019 DCHECK(as_int.Is32Bits());
1020 TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
1021 on_failed_conversion);
1022 }
1023
1024 // Try to represent a double as a signed 64-bit int.
1025 // This succeeds if the result compares equal to the input, so inputs of -0.0
1026 // are represented as 0 and handled as a success.
1027 //
1028 // On output the Z flag is set if the operation was successful.
1029 void TryRepresentDoubleAsInt64(Register as_int,
1030 FPRegister value,
1031 FPRegister scratch_d,
1032 Label* on_successful_conversion = NULL,
1033 Label* on_failed_conversion = NULL) {
1034 DCHECK(as_int.Is64Bits());
1035 TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
1036 on_failed_conversion);
1037 }
1038
1039 // ---- Object Utilities ----
1040
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001041 // Initialize fields with filler values. Fields starting at |current_address|
1042 // not including |end_address| are overwritten with the value in |filler|. At
1043 // the end the loop, |current_address| takes the value of |end_address|.
1044 void InitializeFieldsWithFiller(Register current_address,
1045 Register end_address, Register filler);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001046
1047 // Copies a number of bytes from src to dst. All passed registers are
1048 // clobbered. On exit src and dst will point to the place just after where the
1049 // last byte was read or written and length will be zero. Hint may be used to
1050 // determine which is the most efficient algorithm to use for copying.
1051 void CopyBytes(Register dst,
1052 Register src,
1053 Register length,
1054 Register scratch,
1055 CopyHint hint = kCopyUnknown);
1056
1057 // ---- String Utilities ----
1058
1059
1060 // Jump to label if either object is not a sequential one-byte string.
1061 // Optionally perform a smi check on the objects first.
1062 void JumpIfEitherIsNotSequentialOneByteStrings(
1063 Register first, Register second, Register scratch1, Register scratch2,
1064 Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
1065
1066 // Check if instance type is sequential one-byte string and jump to label if
1067 // it is not.
1068 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
1069 Label* failure);
1070
1071 // Checks if both instance types are sequential one-byte strings and jumps to
1072 // label if either is not.
1073 void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
1074 Register first_object_instance_type, Register second_object_instance_type,
1075 Register scratch1, Register scratch2, Label* failure);
1076
1077 // Checks if both instance types are sequential one-byte strings and jumps to
1078 // label if either is not.
1079 void JumpIfBothInstanceTypesAreNotSequentialOneByte(
1080 Register first_object_instance_type, Register second_object_instance_type,
1081 Register scratch1, Register scratch2, Label* failure);
1082
1083 void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
1084
1085 // ---- Calling / Jumping helpers ----
1086
1087 // This is required for compatibility in architecture indepenedant code.
1088 inline void jmp(Label* L) { B(L); }
1089
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001090 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
1091 void TailCallStub(CodeStub* stub);
1092
1093 void CallRuntime(const Runtime::Function* f,
1094 int num_arguments,
1095 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1096
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001097 // Convenience function: Same as above, but takes the fid instead.
1098 void CallRuntime(Runtime::FunctionId fid, int num_arguments,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001099 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001100 CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001101 }
1102
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001103 // Convenience function: Same as above, but takes the fid instead.
1104 void CallRuntime(Runtime::FunctionId fid,
1105 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1106 const Runtime::Function* function = Runtime::FunctionForId(fid);
1107 CallRuntime(function, function->nargs, save_doubles);
1108 }
1109
1110 void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
1111 const Runtime::Function* function = Runtime::FunctionForId(fid);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001112 CallRuntime(function, function->nargs, kSaveFPRegs);
1113 }
1114
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001115 void TailCallRuntime(Runtime::FunctionId fid);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001116
1117 int ActivationFrameAlignment();
1118
1119 // Calls a C function.
1120 // The called function is not allowed to trigger a
1121 // garbage collection, since that might move the code and invalidate the
1122 // return address (unless this is somehow accounted for by the called
1123 // function).
1124 void CallCFunction(ExternalReference function,
1125 int num_reg_arguments);
1126 void CallCFunction(ExternalReference function,
1127 int num_reg_arguments,
1128 int num_double_arguments);
1129 void CallCFunction(Register function,
1130 int num_reg_arguments,
1131 int num_double_arguments);
1132
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001133 // Jump to a runtime routine.
1134 void JumpToExternalReference(const ExternalReference& builtin);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001135
1136 // Convenience function: call an external reference.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001137 void CallExternalReference(const ExternalReference& ext,
1138 int num_arguments);
1139
1140
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001141 // Invoke specified builtin JavaScript function.
1142 void InvokeBuiltin(int native_context_index, InvokeFlag flag,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001143 const CallWrapper& call_wrapper = NullCallWrapper());
1144
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001145 void Jump(Register target);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001146 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
1147 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
1148 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001149
1150 void Call(Register target);
1151 void Call(Label* target);
1152 void Call(Address target, RelocInfo::Mode rmode);
1153 void Call(Handle<Code> code,
1154 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1155 TypeFeedbackId ast_id = TypeFeedbackId::None());
1156
1157 // For every Call variant, there is a matching CallSize function that returns
1158 // the size (in bytes) of the call sequence.
1159 static int CallSize(Register target);
1160 static int CallSize(Label* target);
1161 static int CallSize(Address target, RelocInfo::Mode rmode);
1162 static int CallSize(Handle<Code> code,
1163 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1164 TypeFeedbackId ast_id = TypeFeedbackId::None());
1165
1166 // Registers used through the invocation chain are hard-coded.
1167 // We force passing the parameters to ensure the contracts are correctly
1168 // honoured by the caller.
1169 // 'function' must be x1.
1170 // 'actual' must use an immediate or x0.
1171 // 'expected' must use an immediate or x2.
1172 // 'call_kind' must be x5.
1173 void InvokePrologue(const ParameterCount& expected,
1174 const ParameterCount& actual,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001175 Label* done,
1176 InvokeFlag flag,
1177 bool* definitely_mismatches,
1178 const CallWrapper& call_wrapper);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001179 void FloodFunctionIfStepping(Register fun, Register new_target,
1180 const ParameterCount& expected,
1181 const ParameterCount& actual);
1182 void InvokeFunctionCode(Register function, Register new_target,
1183 const ParameterCount& expected,
1184 const ParameterCount& actual, InvokeFlag flag,
1185 const CallWrapper& call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001186 // Invoke the JavaScript function in the given register.
1187 // Changes the current context to the context in the function before invoking.
1188 void InvokeFunction(Register function,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001189 Register new_target,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001190 const ParameterCount& actual,
1191 InvokeFlag flag,
1192 const CallWrapper& call_wrapper);
1193 void InvokeFunction(Register function,
1194 const ParameterCount& expected,
1195 const ParameterCount& actual,
1196 InvokeFlag flag,
1197 const CallWrapper& call_wrapper);
1198 void InvokeFunction(Handle<JSFunction> function,
1199 const ParameterCount& expected,
1200 const ParameterCount& actual,
1201 InvokeFlag flag,
1202 const CallWrapper& call_wrapper);
1203
1204
1205 // ---- Floating point helpers ----
1206
1207 // Perform a conversion from a double to a signed int64. If the input fits in
1208 // range of the 64-bit result, execution branches to done. Otherwise,
1209 // execution falls through, and the sign of the result can be used to
1210 // determine if overflow was towards positive or negative infinity.
1211 //
1212 // On successful conversion, the least significant 32 bits of the result are
1213 // equivalent to the ECMA-262 operation "ToInt32".
1214 //
1215 // Only public for the test code in test-code-stubs-arm64.cc.
1216 void TryConvertDoubleToInt64(Register result,
1217 DoubleRegister input,
1218 Label* done);
1219
1220 // Performs a truncating conversion of a floating point number as used by
1221 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1222 // Exits with 'result' holding the answer.
1223 void TruncateDoubleToI(Register result, DoubleRegister double_input);
1224
1225 // Performs a truncating conversion of a heap number as used by
1226 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1227 // must be different registers. Exits with 'result' holding the answer.
1228 void TruncateHeapNumberToI(Register result, Register object);
1229
1230 // Converts the smi or heap number in object to an int32 using the rules
1231 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1232 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1233 // different registers.
1234 void TruncateNumberToI(Register object,
1235 Register result,
1236 Register heap_number_map,
1237 Label* not_int32);
1238
1239 // ---- Code generation helpers ----
1240
1241 void set_generating_stub(bool value) { generating_stub_ = value; }
1242 bool generating_stub() const { return generating_stub_; }
1243#if DEBUG
1244 void set_allow_macro_instructions(bool value) {
1245 allow_macro_instructions_ = value;
1246 }
1247 bool allow_macro_instructions() const { return allow_macro_instructions_; }
1248#endif
1249 bool use_real_aborts() const { return use_real_aborts_; }
1250 void set_has_frame(bool value) { has_frame_ = value; }
1251 bool has_frame() const { return has_frame_; }
1252 bool AllowThisStubCall(CodeStub* stub);
1253
1254 class NoUseRealAbortsScope {
1255 public:
1256 explicit NoUseRealAbortsScope(MacroAssembler* masm) :
1257 saved_(masm->use_real_aborts_), masm_(masm) {
1258 masm_->use_real_aborts_ = false;
1259 }
1260 ~NoUseRealAbortsScope() {
1261 masm_->use_real_aborts_ = saved_;
1262 }
1263 private:
1264 bool saved_;
1265 MacroAssembler* masm_;
1266 };
1267
1268 // ---------------------------------------------------------------------------
1269 // Debugger Support
1270
1271 void DebugBreak();
1272
1273 // ---------------------------------------------------------------------------
1274 // Exception handling
1275
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001276 // Push a new stack handler and link into stack handler chain.
1277 void PushStackHandler();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001278
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001279 // Unlink the stack handler on top of the stack from the stack handler chain.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001280 // Must preserve the result register.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001281 void PopStackHandler();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001282
1283
1284 // ---------------------------------------------------------------------------
1285 // Allocation support
1286
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001287 // Allocate an object in new space or old space. The object_size is
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001288 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
1289 // is passed. The allocated object is returned in result.
1290 //
1291 // If the new space is exhausted control continues at the gc_required label.
1292 // In this case, the result and scratch registers may still be clobbered.
1293 // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001294 void Allocate(Register object_size, Register result, Register result_end,
1295 Register scratch, Label* gc_required, AllocationFlags flags);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001296
1297 void Allocate(int object_size,
1298 Register result,
1299 Register scratch1,
1300 Register scratch2,
1301 Label* gc_required,
1302 AllocationFlags flags);
1303
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001304 void AllocateTwoByteString(Register result,
1305 Register length,
1306 Register scratch1,
1307 Register scratch2,
1308 Register scratch3,
1309 Label* gc_required);
1310 void AllocateOneByteString(Register result, Register length,
1311 Register scratch1, Register scratch2,
1312 Register scratch3, Label* gc_required);
1313 void AllocateTwoByteConsString(Register result,
1314 Register length,
1315 Register scratch1,
1316 Register scratch2,
1317 Label* gc_required);
1318 void AllocateOneByteConsString(Register result, Register length,
1319 Register scratch1, Register scratch2,
1320 Label* gc_required);
1321 void AllocateTwoByteSlicedString(Register result,
1322 Register length,
1323 Register scratch1,
1324 Register scratch2,
1325 Label* gc_required);
1326 void AllocateOneByteSlicedString(Register result, Register length,
1327 Register scratch1, Register scratch2,
1328 Label* gc_required);
1329
1330 // Allocates a heap number or jumps to the gc_required label if the young
1331 // space is full and a scavenge is needed.
1332 // All registers are clobbered.
1333 // If no heap_number_map register is provided, the function will take care of
1334 // loading it.
1335 void AllocateHeapNumber(Register result,
1336 Label* gc_required,
1337 Register scratch1,
1338 Register scratch2,
1339 CPURegister value = NoFPReg,
1340 CPURegister heap_number_map = NoReg,
1341 MutableMode mode = IMMUTABLE);
1342
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001343 // Allocate and initialize a JSValue wrapper with the specified {constructor}
1344 // and {value}.
1345 void AllocateJSValue(Register result, Register constructor, Register value,
1346 Register scratch1, Register scratch2,
1347 Label* gc_required);
1348
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001349 // ---------------------------------------------------------------------------
1350 // Support functions.
1351
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001352 // Machine code version of Map::GetConstructor().
1353 // |temp| holds |result|'s map when done, and |temp2| its instance type.
1354 void GetMapConstructor(Register result, Register map, Register temp,
1355 Register temp2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001356
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001357 void TryGetFunctionPrototype(Register function, Register result,
1358 Register scratch, Label* miss);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001359
1360 // Compare object type for heap object. heap_object contains a non-Smi
1361 // whose object type should be compared with the given type. This both
1362 // sets the flags and leaves the object type in the type_reg register.
1363 // It leaves the map in the map register (unless the type_reg and map register
1364 // are the same register). It leaves the heap object in the heap_object
1365 // register unless the heap_object register is the same register as one of the
1366 // other registers.
1367 void CompareObjectType(Register heap_object,
1368 Register map,
1369 Register type_reg,
1370 InstanceType type);
1371
1372
1373 // Compare object type for heap object, and branch if equal (or not.)
1374 // heap_object contains a non-Smi whose object type should be compared with
1375 // the given type. This both sets the flags and leaves the object type in
1376 // the type_reg register. It leaves the map in the map register (unless the
1377 // type_reg and map register are the same register). It leaves the heap
1378 // object in the heap_object register unless the heap_object register is the
1379 // same register as one of the other registers.
1380 void JumpIfObjectType(Register object,
1381 Register map,
1382 Register type_reg,
1383 InstanceType type,
1384 Label* if_cond_pass,
1385 Condition cond = eq);
1386
1387 void JumpIfNotObjectType(Register object,
1388 Register map,
1389 Register type_reg,
1390 InstanceType type,
1391 Label* if_not_object);
1392
1393 // Compare instance type in a map. map contains a valid map object whose
1394 // object type should be compared with the given type. This both
1395 // sets the flags and leaves the object type in the type_reg register.
1396 void CompareInstanceType(Register map,
1397 Register type_reg,
1398 InstanceType type);
1399
1400 // Compare an object's map with the specified map. Condition flags are set
1401 // with result of map compare.
1402 void CompareObjectMap(Register obj, Heap::RootListIndex index);
1403
1404 // Compare an object's map with the specified map. Condition flags are set
1405 // with result of map compare.
1406 void CompareObjectMap(Register obj, Register scratch, Handle<Map> map);
1407
1408 // As above, but the map of the object is already loaded into the register
1409 // which is preserved by the code generated.
1410 void CompareMap(Register obj_map,
1411 Handle<Map> map);
1412
1413 // Check if the map of an object is equal to a specified map and branch to
1414 // label if not. Skip the smi check if not required (object is known to be a
1415 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1416 // against maps that are ElementsKind transition maps of the specified map.
1417 void CheckMap(Register obj,
1418 Register scratch,
1419 Handle<Map> map,
1420 Label* fail,
1421 SmiCheckType smi_check_type);
1422
1423
1424 void CheckMap(Register obj,
1425 Register scratch,
1426 Heap::RootListIndex index,
1427 Label* fail,
1428 SmiCheckType smi_check_type);
1429
1430 // As above, but the map of the object is already loaded into obj_map, and is
1431 // preserved.
1432 void CheckMap(Register obj_map,
1433 Handle<Map> map,
1434 Label* fail,
1435 SmiCheckType smi_check_type);
1436
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001437 // Check if the map of an object is equal to a specified weak map and branch
1438 // to a specified target if equal. Skip the smi check if not required
1439 // (object is known to be a heap object)
1440 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1441 Handle<WeakCell> cell, Handle<Code> success,
1442 SmiCheckType smi_check_type);
1443
1444 // Compare the given value and the value of weak cell.
1445 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
1446
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001447 void GetWeakValue(Register value, Handle<WeakCell> cell);
1448
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001449 // Load the value of the weak cell in the value register. Branch to the given
1450 // miss label if the weak cell was cleared.
1451 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001452
1453 // Test the bitfield of the heap object map with mask and set the condition
1454 // flags. The object register is preserved.
1455 void TestMapBitfield(Register object, uint64_t mask);
1456
1457 // Load the elements kind field from a map, and return it in the result
1458 // register.
1459 void LoadElementsKindFromMap(Register result, Register map);
1460
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001461 // Load the value from the root list and push it onto the stack.
1462 void PushRoot(Heap::RootListIndex index);
1463
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001464 // Compare the object in a register to a value from the root list.
1465 void CompareRoot(const Register& obj, Heap::RootListIndex index);
1466
1467 // Compare the object in a register to a value and jump if they are equal.
1468 void JumpIfRoot(const Register& obj,
1469 Heap::RootListIndex index,
1470 Label* if_equal);
1471
1472 // Compare the object in a register to a value and jump if they are not equal.
1473 void JumpIfNotRoot(const Register& obj,
1474 Heap::RootListIndex index,
1475 Label* if_not_equal);
1476
1477 // Load and check the instance type of an object for being a unique name.
1478 // Loads the type into the second argument register.
1479 // The object and type arguments can be the same register; in that case it
1480 // will be overwritten with the type.
1481 // Fall-through if the object was a string and jump on fail otherwise.
1482 inline void IsObjectNameType(Register object, Register type, Label* fail);
1483
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001484 // Load and check the instance type of an object for being a string.
1485 // Loads the type into the second argument register.
1486 // The object and type arguments can be the same register; in that case it
1487 // will be overwritten with the type.
1488 // Jumps to not_string or string appropriate. If the appropriate label is
1489 // NULL, fall through.
1490 inline void IsObjectJSStringType(Register object, Register type,
1491 Label* not_string, Label* string = NULL);
1492
1493 // Compare the contents of a register with an operand, and branch to true,
1494 // false or fall through, depending on condition.
1495 void CompareAndSplit(const Register& lhs,
1496 const Operand& rhs,
1497 Condition cond,
1498 Label* if_true,
1499 Label* if_false,
1500 Label* fall_through);
1501
1502 // Test the bits of register defined by bit_pattern, and branch to
1503 // if_any_set, if_all_clear or fall_through accordingly.
1504 void TestAndSplit(const Register& reg,
1505 uint64_t bit_pattern,
1506 Label* if_all_clear,
1507 Label* if_any_set,
1508 Label* fall_through);
1509
1510 // Check if a map for a JSObject indicates that the object has fast elements.
1511 // Jump to the specified label if it does not.
1512 void CheckFastElements(Register map, Register scratch, Label* fail);
1513
1514 // Check if a map for a JSObject indicates that the object can have both smi
1515 // and HeapObject elements. Jump to the specified label if it does not.
1516 void CheckFastObjectElements(Register map, Register scratch, Label* fail);
1517
1518 // Check to see if number can be stored as a double in FastDoubleElements.
1519 // If it can, store it at the index specified by key_reg in the array,
1520 // otherwise jump to fail.
1521 void StoreNumberToDoubleElements(Register value_reg,
1522 Register key_reg,
1523 Register elements_reg,
1524 Register scratch1,
1525 FPRegister fpscratch1,
1526 Label* fail,
1527 int elements_offset = 0);
1528
1529 // Picks out an array index from the hash field.
1530 // Register use:
1531 // hash - holds the index's hash. Clobbered.
1532 // index - holds the overwritten index on exit.
1533 void IndexFromHash(Register hash, Register index);
1534
1535 // ---------------------------------------------------------------------------
1536 // Inline caching support.
1537
1538 void EmitSeqStringSetCharCheck(Register string,
1539 Register index,
1540 SeqStringSetCharCheckIndexType index_type,
1541 Register scratch,
1542 uint32_t encoding_mask);
1543
1544 // Generate code for checking access rights - used for security checks
1545 // on access to global objects across environments. The holder register
1546 // is left untouched, whereas both scratch registers are clobbered.
1547 void CheckAccessGlobalProxy(Register holder_reg,
1548 Register scratch1,
1549 Register scratch2,
1550 Label* miss);
1551
1552 // Hash the interger value in 'key' register.
1553 // It uses the same algorithm as ComputeIntegerHash in utils.h.
1554 void GetNumberHash(Register key, Register scratch);
1555
1556 // Load value from the dictionary.
1557 //
1558 // elements - holds the slow-case elements of the receiver on entry.
1559 // Unchanged unless 'result' is the same register.
1560 //
1561 // key - holds the smi key on entry.
1562 // Unchanged unless 'result' is the same register.
1563 //
1564 // result - holds the result on exit if the load succeeded.
1565 // Allowed to be the same as 'key' or 'result'.
1566 // Unchanged on bailout so 'key' or 'result' can be used
1567 // in further computation.
1568 void LoadFromNumberDictionary(Label* miss,
1569 Register elements,
1570 Register key,
1571 Register result,
1572 Register scratch0,
1573 Register scratch1,
1574 Register scratch2,
1575 Register scratch3);
1576
1577 // ---------------------------------------------------------------------------
1578 // Frames.
1579
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001580 // Load the type feedback vector from a JavaScript frame.
1581 void EmitLoadTypeFeedbackVector(Register vector);
1582
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001583 // Activation support.
1584 void EnterFrame(StackFrame::Type type);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001585 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001586 void LeaveFrame(StackFrame::Type type);
1587
1588 // Returns map with validated enum cache in object register.
1589 void CheckEnumCache(Register object,
1590 Register null_value,
1591 Register scratch0,
1592 Register scratch1,
1593 Register scratch2,
1594 Register scratch3,
1595 Label* call_runtime);
1596
1597 // AllocationMemento support. Arrays may have an associated
1598 // AllocationMemento object that can be checked for in order to pretransition
1599 // to another type.
1600 // On entry, receiver should point to the array object.
1601 // If allocation info is present, the Z flag is set (so that the eq
1602 // condition will pass).
1603 void TestJSArrayForAllocationMemento(Register receiver,
1604 Register scratch1,
1605 Register scratch2,
1606 Label* no_memento_found);
1607
1608 void JumpIfJSArrayHasAllocationMemento(Register receiver,
1609 Register scratch1,
1610 Register scratch2,
1611 Label* memento_found) {
1612 Label no_memento_found;
1613 TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
1614 &no_memento_found);
1615 B(eq, memento_found);
1616 Bind(&no_memento_found);
1617 }
1618
1619 // The stack pointer has to switch between csp and jssp when setting up and
1620 // destroying the exit frame. Hence preserving/restoring the registers is
1621 // slightly more complicated than simple push/pop operations.
1622 void ExitFramePreserveFPRegs();
1623 void ExitFrameRestoreFPRegs();
1624
1625 // Generates function and stub prologue code.
1626 void StubPrologue();
1627 void Prologue(bool code_pre_aging);
1628
1629 // Enter exit frame. Exit frames are used when calling C code from generated
1630 // (JavaScript) code.
1631 //
1632 // The stack pointer must be jssp on entry, and will be set to csp by this
1633 // function. The frame pointer is also configured, but the only other
1634 // registers modified by this function are the provided scratch register, and
1635 // jssp.
1636 //
1637 // The 'extra_space' argument can be used to allocate some space in the exit
1638 // frame that will be ignored by the GC. This space will be reserved in the
1639 // bottom of the frame immediately above the return address slot.
1640 //
1641 // Set up a stack frame and registers as follows:
1642 // fp[8]: CallerPC (lr)
1643 // fp -> fp[0]: CallerFP (old fp)
1644 // fp[-8]: SPOffset (new csp)
1645 // fp[-16]: CodeObject()
1646 // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
1647 // csp[8]: Memory reserved for the caller if extra_space != 0.
1648 // Alignment padding, if necessary.
1649 // csp -> csp[0]: Space reserved for the return address.
1650 //
1651 // This function also stores the new frame information in the top frame, so
1652 // that the new frame becomes the current frame.
1653 void EnterExitFrame(bool save_doubles,
1654 const Register& scratch,
1655 int extra_space = 0);
1656
1657 // Leave the current exit frame, after a C function has returned to generated
1658 // (JavaScript) code.
1659 //
1660 // This effectively unwinds the operation of EnterExitFrame:
1661 // * Preserved doubles are restored (if restore_doubles is true).
1662 // * The frame information is removed from the top frame.
1663 // * The exit frame is dropped.
1664 // * The stack pointer is reset to jssp.
1665 //
1666 // The stack pointer must be csp on entry.
1667 void LeaveExitFrame(bool save_doubles,
1668 const Register& scratch,
1669 bool restore_context);
1670
1671 void LoadContext(Register dst, int context_chain_length);
1672
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001673 // Load the global object from the current context.
1674 void LoadGlobalObject(Register dst) {
1675 LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
1676 }
1677
1678 // Load the global proxy from the current context.
1679 void LoadGlobalProxy(Register dst) {
1680 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
1681 }
1682
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001683 // Emit code for a truncating division by a constant. The dividend register is
1684 // unchanged. Dividend and result must be different.
1685 void TruncatingDiv(Register result, Register dividend, int32_t divisor);
1686
1687 // ---------------------------------------------------------------------------
1688 // StatsCounter support
1689
1690 void SetCounter(StatsCounter* counter, int value, Register scratch1,
1691 Register scratch2);
1692 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1693 Register scratch2);
1694 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1695 Register scratch2);
1696
1697 // ---------------------------------------------------------------------------
1698 // Garbage collector support (GC).
1699
1700 enum RememberedSetFinalAction {
1701 kReturnAtEnd,
1702 kFallThroughAtEnd
1703 };
1704
1705 // Record in the remembered set the fact that we have a pointer to new space
1706 // at the address pointed to by the addr register. Only works if addr is not
1707 // in new space.
1708 void RememberedSetHelper(Register object, // Used for debug code.
1709 Register addr,
1710 Register scratch1,
1711 SaveFPRegsMode save_fp,
1712 RememberedSetFinalAction and_then);
1713
1714 // Push and pop the registers that can hold pointers, as defined by the
1715 // RegList constant kSafepointSavedRegisters.
1716 void PushSafepointRegisters();
1717 void PopSafepointRegisters();
1718
1719 void PushSafepointRegistersAndDoubles();
1720 void PopSafepointRegistersAndDoubles();
1721
1722 // Store value in register src in the safepoint stack slot for register dst.
1723 void StoreToSafepointRegisterSlot(Register src, Register dst) {
1724 Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
1725 }
1726
1727 // Load the value of the src register from its safepoint stack slot
1728 // into register dst.
1729 void LoadFromSafepointRegisterSlot(Register dst, Register src) {
1730 Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
1731 }
1732
1733 void CheckPageFlagSet(const Register& object,
1734 const Register& scratch,
1735 int mask,
1736 Label* if_any_set);
1737
1738 void CheckPageFlagClear(const Register& object,
1739 const Register& scratch,
1740 int mask,
1741 Label* if_all_clear);
1742
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001743 // Check if object is in new space and jump accordingly.
1744 // Register 'object' is preserved.
1745 void JumpIfNotInNewSpace(Register object,
1746 Label* branch) {
1747 InNewSpace(object, ne, branch);
1748 }
1749
1750 void JumpIfInNewSpace(Register object,
1751 Label* branch) {
1752 InNewSpace(object, eq, branch);
1753 }
1754
1755 // Notify the garbage collector that we wrote a pointer into an object.
1756 // |object| is the object being stored into, |value| is the object being
1757 // stored. value and scratch registers are clobbered by the operation.
1758 // The offset is the offset from the start of the object, not the offset from
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001759 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001760 void RecordWriteField(
1761 Register object,
1762 int offset,
1763 Register value,
1764 Register scratch,
1765 LinkRegisterStatus lr_status,
1766 SaveFPRegsMode save_fp,
1767 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1768 SmiCheck smi_check = INLINE_SMI_CHECK,
1769 PointersToHereCheck pointers_to_here_check_for_value =
1770 kPointersToHereMaybeInteresting);
1771
1772 // As above, but the offset has the tag presubtracted. For use with
1773 // MemOperand(reg, off).
1774 inline void RecordWriteContextSlot(
1775 Register context,
1776 int offset,
1777 Register value,
1778 Register scratch,
1779 LinkRegisterStatus lr_status,
1780 SaveFPRegsMode save_fp,
1781 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1782 SmiCheck smi_check = INLINE_SMI_CHECK,
1783 PointersToHereCheck pointers_to_here_check_for_value =
1784 kPointersToHereMaybeInteresting) {
1785 RecordWriteField(context,
1786 offset + kHeapObjectTag,
1787 value,
1788 scratch,
1789 lr_status,
1790 save_fp,
1791 remembered_set_action,
1792 smi_check,
1793 pointers_to_here_check_for_value);
1794 }
1795
1796 void RecordWriteForMap(
1797 Register object,
1798 Register map,
1799 Register dst,
1800 LinkRegisterStatus lr_status,
1801 SaveFPRegsMode save_fp);
1802
1803 // For a given |object| notify the garbage collector that the slot |address|
1804 // has been written. |value| is the object being stored. The value and
1805 // address registers are clobbered by the operation.
1806 void RecordWrite(
1807 Register object,
1808 Register address,
1809 Register value,
1810 LinkRegisterStatus lr_status,
1811 SaveFPRegsMode save_fp,
1812 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1813 SmiCheck smi_check = INLINE_SMI_CHECK,
1814 PointersToHereCheck pointers_to_here_check_for_value =
1815 kPointersToHereMaybeInteresting);
1816
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001817 // Checks the color of an object. If the object is white we jump to the
1818 // incremental marker.
1819 void JumpIfWhite(Register value, Register scratch1, Register scratch2,
1820 Register scratch3, Register scratch4, Label* value_is_white);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001821
1822 // Helper for finding the mark bits for an address.
1823 // Note that the behaviour slightly differs from other architectures.
1824 // On exit:
1825 // - addr_reg is unchanged.
1826 // - The bitmap register points at the word with the mark bits.
1827 // - The shift register contains the index of the first color bit for this
1828 // object in the bitmap.
1829 inline void GetMarkBits(Register addr_reg,
1830 Register bitmap_reg,
1831 Register shift_reg);
1832
1833 // Check if an object has a given incremental marking color.
1834 void HasColor(Register object,
1835 Register scratch0,
1836 Register scratch1,
1837 Label* has_color,
1838 int first_bit,
1839 int second_bit);
1840
1841 void JumpIfBlack(Register object,
1842 Register scratch0,
1843 Register scratch1,
1844 Label* on_black);
1845
1846
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001847 // ---------------------------------------------------------------------------
1848 // Debugging.
1849
1850 // Calls Abort(msg) if the condition cond is not satisfied.
1851 // Use --debug_code to enable.
1852 void Assert(Condition cond, BailoutReason reason);
1853 void AssertRegisterIsClear(Register reg, BailoutReason reason);
1854 void AssertRegisterIsRoot(
1855 Register reg,
1856 Heap::RootListIndex index,
1857 BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
1858 void AssertFastElements(Register elements);
1859
1860 // Abort if the specified register contains the invalid color bit pattern.
1861 // The pattern must be in bits [1:0] of 'reg' register.
1862 //
1863 // If emit_debug_code() is false, this emits no code.
1864 void AssertHasValidColor(const Register& reg);
1865
1866 // Abort if 'object' register doesn't point to a string object.
1867 //
1868 // If emit_debug_code() is false, this emits no code.
1869 void AssertIsString(const Register& object);
1870
1871 // Like Assert(), but always enabled.
1872 void Check(Condition cond, BailoutReason reason);
1873 void CheckRegisterIsClear(Register reg, BailoutReason reason);
1874
1875 // Print a message to stderr and abort execution.
1876 void Abort(BailoutReason reason);
1877
1878 // Conditionally load the cached Array transitioned map of type
1879 // transitioned_kind from the native context if the map in register
1880 // map_in_out is the cached Array map in the native context of
1881 // expected_kind.
1882 void LoadTransitionedArrayMapConditional(
1883 ElementsKind expected_kind,
1884 ElementsKind transitioned_kind,
1885 Register map_in_out,
1886 Register scratch1,
1887 Register scratch2,
1888 Label* no_map_match);
1889
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001890 void LoadNativeContextSlot(int index, Register dst);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001891
1892 // Load the initial map from the global function. The registers function and
1893 // map can be the same, function is then overwritten.
1894 void LoadGlobalFunctionInitialMap(Register function,
1895 Register map,
1896 Register scratch);
1897
1898 CPURegList* TmpList() { return &tmp_list_; }
1899 CPURegList* FPTmpList() { return &fptmp_list_; }
1900
1901 static CPURegList DefaultTmpList();
1902 static CPURegList DefaultFPTmpList();
1903
1904 // Like printf, but print at run-time from generated code.
1905 //
1906 // The caller must ensure that arguments for floating-point placeholders
1907 // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
1908 // placeholders are Registers.
1909 //
1910 // At the moment it is only possible to print the value of csp if it is the
1911 // current stack pointer. Otherwise, the MacroAssembler will automatically
1912 // update csp on every push (using BumpSystemStackPointer), so determining its
1913 // value is difficult.
1914 //
1915 // Format placeholders that refer to more than one argument, or to a specific
1916 // argument, are not supported. This includes formats like "%1$d" or "%.*d".
1917 //
1918 // This function automatically preserves caller-saved registers so that
1919 // calling code can use Printf at any point without having to worry about
1920 // corruption. The preservation mechanism generates a lot of code. If this is
1921 // a problem, preserve the important registers manually and then call
1922 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
1923 // implicitly preserved.
1924 void Printf(const char * format,
1925 CPURegister arg0 = NoCPUReg,
1926 CPURegister arg1 = NoCPUReg,
1927 CPURegister arg2 = NoCPUReg,
1928 CPURegister arg3 = NoCPUReg);
1929
1930 // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
1931 //
1932 // The return code from the system printf call will be returned in x0.
1933 void PrintfNoPreserve(const char * format,
1934 const CPURegister& arg0 = NoCPUReg,
1935 const CPURegister& arg1 = NoCPUReg,
1936 const CPURegister& arg2 = NoCPUReg,
1937 const CPURegister& arg3 = NoCPUReg);
1938
1939 // Code ageing support functions.
1940
1941 // Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
1942 // function as old, it replaces some of the function prologue (generated by
1943 // FullCodeGenerator::Generate) with a call to a special stub (ultimately
1944 // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
1945 // function prologue to its initial young state (indicating that it has been
1946 // recently run) and continues. A young function is therefore one which has a
1947 // normal frame setup sequence, and an old function has a code age sequence
1948 // which calls a code ageing stub.
1949
1950 // Set up a basic stack frame for young code (or code exempt from ageing) with
1951 // type FUNCTION. It may be patched later for code ageing support. This is
1952 // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
1953 //
1954 // This function takes an Assembler so it can be called from either a
1955 // MacroAssembler or a PatchingAssembler context.
1956 static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
1957
1958 // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
1959 void EmitFrameSetupForCodeAgePatching();
1960
1961 // Emit a code age sequence that calls the relevant code age stub. The code
1962 // generated by this sequence is expected to replace the code generated by
1963 // EmitFrameSetupForCodeAgePatching, and represents an old function.
1964 //
1965 // If stub is NULL, this function generates the code age sequence but omits
1966 // the stub address that is normally embedded in the instruction stream. This
1967 // can be used by debug code to verify code age sequences.
1968 static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
1969
1970 // Call EmitCodeAgeSequence from a MacroAssembler context.
1971 void EmitCodeAgeSequence(Code* stub);
1972
1973 // Return true if the sequence is a young sequence geneated by
1974 // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
1975 // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
1976 static bool IsYoungSequence(Isolate* isolate, byte* sequence);
1977
1978 // Jumps to found label if a prototype map has dictionary elements.
1979 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1980 Register scratch1, Label* found);
1981
1982 // Perform necessary maintenance operations before a push or after a pop.
1983 //
1984 // Note that size is specified in bytes.
1985 void PushPreamble(Operand total_size);
1986 void PopPostamble(Operand total_size);
1987
1988 void PushPreamble(int count, int size) { PushPreamble(count * size); }
1989 void PopPostamble(int count, int size) { PopPostamble(count * size); }
1990
1991 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001992 // The actual Push and Pop implementations. These don't generate any code
1993 // other than that required for the push or pop. This allows
1994 // (Push|Pop)CPURegList to bundle together run-time assertions for a large
1995 // block of registers.
1996 //
1997 // Note that size is per register, and is specified in bytes.
1998 void PushHelper(int count, int size,
1999 const CPURegister& src0, const CPURegister& src1,
2000 const CPURegister& src2, const CPURegister& src3);
2001 void PopHelper(int count, int size,
2002 const CPURegister& dst0, const CPURegister& dst1,
2003 const CPURegister& dst2, const CPURegister& dst3);
2004
2005 // Call Printf. On a native build, a simple call will be generated, but if the
2006 // simulator is being used then a suitable pseudo-instruction is used. The
2007 // arguments and stack (csp) must be prepared by the caller as for a normal
2008 // AAPCS64 call to 'printf'.
2009 //
2010 // The 'args' argument should point to an array of variable arguments in their
2011 // proper PCS registers (and in calling order). The argument registers can
2012 // have mixed types. The format string (x0) should not be included.
2013 void CallPrintf(int arg_count = 0, const CPURegister * args = NULL);
2014
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002015 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
2016 void InNewSpace(Register object,
2017 Condition cond, // eq for new space, ne otherwise.
2018 Label* branch);
2019
2020 // Try to represent a double as an int so that integer fast-paths may be
2021 // used. Not every valid integer value is guaranteed to be caught.
2022 // It supports both 32-bit and 64-bit integers depending whether 'as_int'
2023 // is a W or X register.
2024 //
2025 // This does not distinguish between +0 and -0, so if this distinction is
2026 // important it must be checked separately.
2027 //
2028 // On output the Z flag is set if the operation was successful.
2029 void TryRepresentDoubleAsInt(Register as_int,
2030 FPRegister value,
2031 FPRegister scratch_d,
2032 Label* on_successful_conversion = NULL,
2033 Label* on_failed_conversion = NULL);
2034
2035 bool generating_stub_;
2036#if DEBUG
2037 // Tell whether any of the macro instruction can be used. When false the
2038 // MacroAssembler will assert if a method which can emit a variable number
2039 // of instructions is called.
2040 bool allow_macro_instructions_;
2041#endif
2042 bool has_frame_;
2043
2044 // The Abort method should call a V8 runtime function, but the CallRuntime
2045 // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
2046 // use a simpler abort mechanism that doesn't depend on CEntryStub.
2047 //
2048 // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
2049 // being generated.
2050 bool use_real_aborts_;
2051
2052 // This handle will be patched with the code object on installation.
2053 Handle<Object> code_object_;
2054
2055 // The register to use as a stack pointer for stack operations.
2056 Register sp_;
2057
2058 // Scratch registers available for use by the MacroAssembler.
2059 CPURegList tmp_list_;
2060 CPURegList fptmp_list_;
2061
2062 void InitializeNewString(Register string,
2063 Register length,
2064 Heap::RootListIndex map_index,
2065 Register scratch1,
2066 Register scratch2);
2067
2068 public:
2069 // Far branches resolving.
2070 //
2071 // The various classes of branch instructions with immediate offsets have
2072 // different ranges. While the Assembler will fail to assemble a branch
2073 // exceeding its range, the MacroAssembler offers a mechanism to resolve
2074 // branches to too distant targets, either by tweaking the generated code to
2075 // use branch instructions with wider ranges or generating veneers.
2076 //
2077 // Currently branches to distant targets are resolved using unconditional
2078 // branch isntructions with a range of +-128MB. If that becomes too little
2079 // (!), the mechanism can be extended to generate special veneers for really
2080 // far targets.
2081
2082 // Helps resolve branching to labels potentially out of range.
2083 // If the label is not bound, it registers the information necessary to later
2084 // be able to emit a veneer for this branch if necessary.
2085 // If the label is bound, it returns true if the label (or the previous link
2086 // in the label chain) is out of range. In that case the caller is responsible
2087 // for generating appropriate code.
2088 // Otherwise it returns false.
2089 // This function also checks wether veneers need to be emitted.
2090 bool NeedExtraInstructionsOrRegisterBranch(Label *label,
2091 ImmBranchType branch_type);
2092};
2093
2094
2095// Use this scope when you need a one-to-one mapping bewteen methods and
2096// instructions. This scope prevents the MacroAssembler from being called and
2097// literal pools from being emitted. It also asserts the number of instructions
2098// emitted is what you specified when creating the scope.
2099class InstructionAccurateScope BASE_EMBEDDED {
2100 public:
2101 explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
2102 : masm_(masm)
2103#ifdef DEBUG
2104 ,
2105 size_(count * kInstructionSize)
2106#endif
2107 {
2108 // Before blocking the const pool, see if it needs to be emitted.
2109 masm_->CheckConstPool(false, true);
2110 masm_->CheckVeneerPool(false, true);
2111
2112 masm_->StartBlockPools();
2113#ifdef DEBUG
2114 if (count != 0) {
2115 masm_->bind(&start_);
2116 }
2117 previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
2118 masm_->set_allow_macro_instructions(false);
2119#endif
2120 }
2121
2122 ~InstructionAccurateScope() {
2123 masm_->EndBlockPools();
2124#ifdef DEBUG
2125 if (start_.is_bound()) {
2126 DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
2127 }
2128 masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2129#endif
2130 }
2131
2132 private:
2133 MacroAssembler* masm_;
2134#ifdef DEBUG
2135 size_t size_;
2136 Label start_;
2137 bool previous_allow_macro_instructions_;
2138#endif
2139};
2140
2141
2142// This scope utility allows scratch registers to be managed safely. The
2143// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
2144// registers. These registers can be allocated on demand, and will be returned
2145// at the end of the scope.
2146//
2147// When the scope ends, the MacroAssembler's lists will be restored to their
2148// original state, even if the lists were modified by some other means.
2149class UseScratchRegisterScope {
2150 public:
2151 explicit UseScratchRegisterScope(MacroAssembler* masm)
2152 : available_(masm->TmpList()),
2153 availablefp_(masm->FPTmpList()),
2154 old_available_(available_->list()),
2155 old_availablefp_(availablefp_->list()) {
2156 DCHECK(available_->type() == CPURegister::kRegister);
2157 DCHECK(availablefp_->type() == CPURegister::kFPRegister);
2158 }
2159
2160 ~UseScratchRegisterScope();
2161
2162 // Take a register from the appropriate temps list. It will be returned
2163 // automatically when the scope ends.
2164 Register AcquireW() { return AcquireNextAvailable(available_).W(); }
2165 Register AcquireX() { return AcquireNextAvailable(available_).X(); }
2166 FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
2167 FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
2168
2169 Register UnsafeAcquire(const Register& reg) {
2170 return Register(UnsafeAcquire(available_, reg));
2171 }
2172
2173 Register AcquireSameSizeAs(const Register& reg);
2174 FPRegister AcquireSameSizeAs(const FPRegister& reg);
2175
2176 private:
2177 static CPURegister AcquireNextAvailable(CPURegList* available);
2178 static CPURegister UnsafeAcquire(CPURegList* available,
2179 const CPURegister& reg);
2180
2181 // Available scratch registers.
2182 CPURegList* available_; // kRegister
2183 CPURegList* availablefp_; // kFPRegister
2184
2185 // The state of the available lists at the start of this scope.
2186 RegList old_available_; // kRegister
2187 RegList old_availablefp_; // kFPRegister
2188};
2189
2190
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002191inline MemOperand ContextMemOperand(Register context, int index = 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002192 return MemOperand(context, Context::SlotOffset(index));
2193}
2194
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002195inline MemOperand NativeContextMemOperand() {
2196 return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002197}
2198
2199
2200// Encode and decode information about patchable inline SMI checks.
2201class InlineSmiCheckInfo {
2202 public:
2203 explicit InlineSmiCheckInfo(Address info);
2204
2205 bool HasSmiCheck() const {
2206 return smi_check_ != NULL;
2207 }
2208
2209 const Register& SmiRegister() const {
2210 return reg_;
2211 }
2212
2213 Instruction* SmiCheck() const {
2214 return smi_check_;
2215 }
2216
2217 // Use MacroAssembler::InlineData to emit information about patchable inline
2218 // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
2219 // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
2220 //
2221 // The generated patch information can be read using the InlineSMICheckInfo
2222 // class.
2223 static void Emit(MacroAssembler* masm, const Register& reg,
2224 const Label* smi_check);
2225
2226 // Emit information to indicate that there is no inline SMI check.
2227 static void EmitNotInlined(MacroAssembler* masm) {
2228 Label unbound;
2229 Emit(masm, NoReg, &unbound);
2230 }
2231
2232 private:
2233 Register reg_;
2234 Instruction* smi_check_;
2235
2236 // Fields in the data encoded by InlineData.
2237
2238 // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
2239 // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
2240 // used in a patchable check. The Emit() method checks this.
2241 //
2242 // Note that the total size of the fields is restricted by the underlying
2243 // storage size handled by the BitField class, which is a uint32_t.
2244 class RegisterBits : public BitField<unsigned, 0, 5> {};
2245 class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
2246};
2247
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002248} // namespace internal
2249} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002250
2251#ifdef GENERATED_CODE_COVERAGE
2252#error "Unsupported option"
2253#define CODE_COVERAGE_STRINGIFY(x) #x
2254#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
2255#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
2256#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
2257#else
2258#define ACCESS_MASM(masm) masm->
2259#endif
2260
2261#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_