blob: 953c3fd7f2bf245e24fbe1ed9e0850d726bf9d07 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#if V8_TARGET_ARCH_ARM64
6
7#include "src/base/bits.h"
8#include "src/base/division-by-constant.h"
9#include "src/bootstrapper.h"
10#include "src/codegen.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011#include "src/debug/debug.h"
12#include "src/register-configuration.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -040013#include "src/runtime/runtime.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000015#include "src/arm64/frames-arm64.h"
16#include "src/arm64/macro-assembler-arm64.h"
17
Ben Murdochb8a8cc12014-11-26 15:28:44 +000018namespace v8 {
19namespace internal {
20
21// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
22#define __
23
24
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000025MacroAssembler::MacroAssembler(Isolate* arg_isolate, byte* buffer,
26 unsigned buffer_size,
27 CodeObjectRequired create_code_object)
Ben Murdochb8a8cc12014-11-26 15:28:44 +000028 : Assembler(arg_isolate, buffer, buffer_size),
29 generating_stub_(false),
30#if DEBUG
31 allow_macro_instructions_(true),
32#endif
33 has_frame_(false),
34 use_real_aborts_(true),
35 sp_(jssp),
36 tmp_list_(DefaultTmpList()),
37 fptmp_list_(DefaultFPTmpList()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000038 if (create_code_object == CodeObjectRequired::kYes) {
39 code_object_ =
40 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000041 }
42}
43
44
45CPURegList MacroAssembler::DefaultTmpList() {
46 return CPURegList(ip0, ip1);
47}
48
49
50CPURegList MacroAssembler::DefaultFPTmpList() {
51 return CPURegList(fp_scratch1, fp_scratch2);
52}
53
54
55void MacroAssembler::LogicalMacro(const Register& rd,
56 const Register& rn,
57 const Operand& operand,
58 LogicalOp op) {
59 UseScratchRegisterScope temps(this);
60
61 if (operand.NeedsRelocation(this)) {
62 Register temp = temps.AcquireX();
63 Ldr(temp, operand.immediate());
64 Logical(rd, rn, temp, op);
65
66 } else if (operand.IsImmediate()) {
67 int64_t immediate = operand.ImmediateValue();
68 unsigned reg_size = rd.SizeInBits();
69
70 // If the operation is NOT, invert the operation and immediate.
71 if ((op & NOT) == NOT) {
72 op = static_cast<LogicalOp>(op & ~NOT);
73 immediate = ~immediate;
74 }
75
76 // Ignore the top 32 bits of an immediate if we're moving to a W register.
77 if (rd.Is32Bits()) {
78 // Check that the top 32 bits are consistent.
79 DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
80 ((immediate >> kWRegSizeInBits) == -1));
81 immediate &= kWRegMask;
82 }
83
84 DCHECK(rd.Is64Bits() || is_uint32(immediate));
85
86 // Special cases for all set or all clear immediates.
87 if (immediate == 0) {
88 switch (op) {
89 case AND:
90 Mov(rd, 0);
91 return;
92 case ORR: // Fall through.
93 case EOR:
94 Mov(rd, rn);
95 return;
96 case ANDS: // Fall through.
97 case BICS:
98 break;
99 default:
100 UNREACHABLE();
101 }
102 } else if ((rd.Is64Bits() && (immediate == -1L)) ||
103 (rd.Is32Bits() && (immediate == 0xffffffffL))) {
104 switch (op) {
105 case AND:
106 Mov(rd, rn);
107 return;
108 case ORR:
109 Mov(rd, immediate);
110 return;
111 case EOR:
112 Mvn(rd, rn);
113 return;
114 case ANDS: // Fall through.
115 case BICS:
116 break;
117 default:
118 UNREACHABLE();
119 }
120 }
121
122 unsigned n, imm_s, imm_r;
123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
124 // Immediate can be encoded in the instruction.
125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
126 } else {
127 // Immediate can't be encoded: synthesize using move immediate.
128 Register temp = temps.AcquireSameSizeAs(rn);
129 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
130 if (rd.Is(csp)) {
131 // If rd is the stack pointer we cannot use it as the destination
132 // register so we use the temp register as an intermediate again.
133 Logical(temp, rn, imm_operand, op);
134 Mov(csp, temp);
135 AssertStackConsistency();
136 } else {
137 Logical(rd, rn, imm_operand, op);
138 }
139 }
140
141 } else if (operand.IsExtendedRegister()) {
142 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
143 // Add/sub extended supports shift <= 4. We want to support exactly the
144 // same modes here.
145 DCHECK(operand.shift_amount() <= 4);
146 DCHECK(operand.reg().Is64Bits() ||
147 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
148 Register temp = temps.AcquireSameSizeAs(rn);
149 EmitExtendShift(temp, operand.reg(), operand.extend(),
150 operand.shift_amount());
151 Logical(rd, rn, temp, op);
152
153 } else {
154 // The operand can be encoded in the instruction.
155 DCHECK(operand.IsShiftedRegister());
156 Logical(rd, rn, operand, op);
157 }
158}
159
160
161void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
162 DCHECK(allow_macro_instructions_);
163 DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
164 DCHECK(!rd.IsZero());
165
166 // TODO(all) extend to support more immediates.
167 //
168 // Immediates on Aarch64 can be produced using an initial value, and zero to
169 // three move keep operations.
170 //
171 // Initial values can be generated with:
172 // 1. 64-bit move zero (movz).
173 // 2. 32-bit move inverted (movn).
174 // 3. 64-bit move inverted.
175 // 4. 32-bit orr immediate.
176 // 5. 64-bit orr immediate.
177 // Move-keep may then be used to modify each of the 16-bit half-words.
178 //
179 // The code below supports all five initial value generators, and
180 // applying move-keep operations to move-zero and move-inverted initial
181 // values.
182
183 // Try to move the immediate in one instruction, and if that fails, switch to
184 // using multiple instructions.
185 if (!TryOneInstrMoveImmediate(rd, imm)) {
186 unsigned reg_size = rd.SizeInBits();
187
188 // Generic immediate case. Imm will be represented by
189 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
190 // A move-zero or move-inverted is generated for the first non-zero or
191 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
192
193 uint64_t ignored_halfword = 0;
194 bool invert_move = false;
195 // If the number of 0xffff halfwords is greater than the number of 0x0000
196 // halfwords, it's more efficient to use move-inverted.
197 if (CountClearHalfWords(~imm, reg_size) >
198 CountClearHalfWords(imm, reg_size)) {
199 ignored_halfword = 0xffffL;
200 invert_move = true;
201 }
202
203 // Mov instructions can't move immediate values into the stack pointer, so
204 // set up a temporary register, if needed.
205 UseScratchRegisterScope temps(this);
206 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
207
208 // Iterate through the halfwords. Use movn/movz for the first non-ignored
209 // halfword, and movk for subsequent halfwords.
210 DCHECK((reg_size % 16) == 0);
211 bool first_mov_done = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000212 for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000213 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
214 if (imm16 != ignored_halfword) {
215 if (!first_mov_done) {
216 if (invert_move) {
217 movn(temp, (~imm16) & 0xffffL, 16 * i);
218 } else {
219 movz(temp, imm16, 16 * i);
220 }
221 first_mov_done = true;
222 } else {
223 // Construct a wider constant.
224 movk(temp, imm16, 16 * i);
225 }
226 }
227 }
228 DCHECK(first_mov_done);
229
230 // Move the temporary if the original destination register was the stack
231 // pointer.
232 if (rd.IsSP()) {
233 mov(rd, temp);
234 AssertStackConsistency();
235 }
236 }
237}
238
239
240void MacroAssembler::Mov(const Register& rd,
241 const Operand& operand,
242 DiscardMoveMode discard_mode) {
243 DCHECK(allow_macro_instructions_);
244 DCHECK(!rd.IsZero());
245
246 // Provide a swap register for instructions that need to write into the
247 // system stack pointer (and can't do this inherently).
248 UseScratchRegisterScope temps(this);
249 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
250
251 if (operand.NeedsRelocation(this)) {
252 Ldr(dst, operand.immediate());
253
254 } else if (operand.IsImmediate()) {
255 // Call the macro assembler for generic immediates.
256 Mov(dst, operand.ImmediateValue());
257
258 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
259 // Emit a shift instruction if moving a shifted register. This operation
260 // could also be achieved using an orr instruction (like orn used by Mvn),
261 // but using a shift instruction makes the disassembly clearer.
262 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
263
264 } else if (operand.IsExtendedRegister()) {
265 // Emit an extend instruction if moving an extended register. This handles
266 // extend with post-shift operations, too.
267 EmitExtendShift(dst, operand.reg(), operand.extend(),
268 operand.shift_amount());
269
270 } else {
271 // Otherwise, emit a register move only if the registers are distinct, or
272 // if they are not X registers.
273 //
274 // Note that mov(w0, w0) is not a no-op because it clears the top word of
275 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
276 // registers is not required to clear the top word of the X register. In
277 // this case, the instruction is discarded.
278 //
279 // If csp is an operand, add #0 is emitted, otherwise, orr #0.
280 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
281 (discard_mode == kDontDiscardForSameWReg))) {
282 Assembler::mov(rd, operand.reg());
283 }
284 // This case can handle writes into the system stack pointer directly.
285 dst = rd;
286 }
287
288 // Copy the result to the system stack pointer.
289 if (!dst.Is(rd)) {
290 DCHECK(rd.IsSP());
291 Assembler::mov(rd, dst);
292 }
293}
294
295
296void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
297 DCHECK(allow_macro_instructions_);
298
299 if (operand.NeedsRelocation(this)) {
300 Ldr(rd, operand.immediate());
301 mvn(rd, rd);
302
303 } else if (operand.IsImmediate()) {
304 // Call the macro assembler for generic immediates.
305 Mov(rd, ~operand.ImmediateValue());
306
307 } else if (operand.IsExtendedRegister()) {
308 // Emit two instructions for the extend case. This differs from Mov, as
309 // the extend and invert can't be achieved in one instruction.
310 EmitExtendShift(rd, operand.reg(), operand.extend(),
311 operand.shift_amount());
312 mvn(rd, rd);
313
314 } else {
315 mvn(rd, operand);
316 }
317}
318
319
320unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
321 DCHECK((reg_size % 8) == 0);
322 int count = 0;
323 for (unsigned i = 0; i < (reg_size / 16); i++) {
324 if ((imm & 0xffff) == 0) {
325 count++;
326 }
327 imm >>= 16;
328 }
329 return count;
330}
331
332
333// The movz instruction can generate immediates containing an arbitrary 16-bit
334// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
335bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
336 DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
337 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
338}
339
340
341// The movn instruction can generate immediates containing an arbitrary 16-bit
342// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
343bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
344 return IsImmMovz(~imm, reg_size);
345}
346
347
348void MacroAssembler::ConditionalCompareMacro(const Register& rn,
349 const Operand& operand,
350 StatusFlags nzcv,
351 Condition cond,
352 ConditionalCompareOp op) {
353 DCHECK((cond != al) && (cond != nv));
354 if (operand.NeedsRelocation(this)) {
355 UseScratchRegisterScope temps(this);
356 Register temp = temps.AcquireX();
357 Ldr(temp, operand.immediate());
358 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
359
360 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
361 (operand.IsImmediate() &&
362 IsImmConditionalCompare(operand.ImmediateValue()))) {
363 // The immediate can be encoded in the instruction, or the operand is an
364 // unshifted register: call the assembler.
365 ConditionalCompare(rn, operand, nzcv, cond, op);
366
367 } else {
368 // The operand isn't directly supported by the instruction: perform the
369 // operation on a temporary register.
370 UseScratchRegisterScope temps(this);
371 Register temp = temps.AcquireSameSizeAs(rn);
372 Mov(temp, operand);
373 ConditionalCompare(rn, temp, nzcv, cond, op);
374 }
375}
376
377
378void MacroAssembler::Csel(const Register& rd,
379 const Register& rn,
380 const Operand& operand,
381 Condition cond) {
382 DCHECK(allow_macro_instructions_);
383 DCHECK(!rd.IsZero());
384 DCHECK((cond != al) && (cond != nv));
385 if (operand.IsImmediate()) {
386 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
387 // register.
388 int64_t imm = operand.ImmediateValue();
389 Register zr = AppropriateZeroRegFor(rn);
390 if (imm == 0) {
391 csel(rd, rn, zr, cond);
392 } else if (imm == 1) {
393 csinc(rd, rn, zr, cond);
394 } else if (imm == -1) {
395 csinv(rd, rn, zr, cond);
396 } else {
397 UseScratchRegisterScope temps(this);
398 Register temp = temps.AcquireSameSizeAs(rn);
399 Mov(temp, imm);
400 csel(rd, rn, temp, cond);
401 }
402 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
403 // Unshifted register argument.
404 csel(rd, rn, operand.reg(), cond);
405 } else {
406 // All other arguments.
407 UseScratchRegisterScope temps(this);
408 Register temp = temps.AcquireSameSizeAs(rn);
409 Mov(temp, operand);
410 csel(rd, rn, temp, cond);
411 }
412}
413
414
415bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
416 int64_t imm) {
417 unsigned n, imm_s, imm_r;
418 int reg_size = dst.SizeInBits();
419 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
420 // Immediate can be represented in a move zero instruction. Movz can't write
421 // to the stack pointer.
422 movz(dst, imm);
423 return true;
424 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
425 // Immediate can be represented in a move not instruction. Movn can't write
426 // to the stack pointer.
427 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
428 return true;
429 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
430 // Immediate can be represented in a logical orr instruction.
431 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
432 return true;
433 }
434 return false;
435}
436
437
438Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
439 int64_t imm) {
440 int reg_size = dst.SizeInBits();
441
442 // Encode the immediate in a single move instruction, if possible.
443 if (TryOneInstrMoveImmediate(dst, imm)) {
444 // The move was successful; nothing to do here.
445 } else {
446 // Pre-shift the immediate to the least-significant bits of the register.
447 int shift_low = CountTrailingZeros(imm, reg_size);
448 int64_t imm_low = imm >> shift_low;
449
450 // Pre-shift the immediate to the most-significant bits of the register. We
451 // insert set bits in the least-significant bits, as this creates a
452 // different immediate that may be encodable using movn or orr-immediate.
453 // If this new immediate is encodable, the set bits will be eliminated by
454 // the post shift on the following instruction.
455 int shift_high = CountLeadingZeros(imm, reg_size);
456 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
457
458 if (TryOneInstrMoveImmediate(dst, imm_low)) {
459 // The new immediate has been moved into the destination's low bits:
460 // return a new leftward-shifting operand.
461 return Operand(dst, LSL, shift_low);
462 } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
463 // The new immediate has been moved into the destination's high bits:
464 // return a new rightward-shifting operand.
465 return Operand(dst, LSR, shift_high);
466 } else {
467 // Use the generic move operation to set up the immediate.
468 Mov(dst, imm);
469 }
470 }
471 return Operand(dst);
472}
473
474
475void MacroAssembler::AddSubMacro(const Register& rd,
476 const Register& rn,
477 const Operand& operand,
478 FlagsUpdate S,
479 AddSubOp op) {
480 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
481 !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
482 // The instruction would be a nop. Avoid generating useless code.
483 return;
484 }
485
486 if (operand.NeedsRelocation(this)) {
487 UseScratchRegisterScope temps(this);
488 Register temp = temps.AcquireX();
489 Ldr(temp, operand.immediate());
490 AddSubMacro(rd, rn, temp, S, op);
491 } else if ((operand.IsImmediate() &&
492 !IsImmAddSub(operand.ImmediateValue())) ||
493 (rn.IsZero() && !operand.IsShiftedRegister()) ||
494 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
495 UseScratchRegisterScope temps(this);
496 Register temp = temps.AcquireSameSizeAs(rn);
497 if (operand.IsImmediate()) {
498 Operand imm_operand =
499 MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
500 AddSub(rd, rn, imm_operand, S, op);
501 } else {
502 Mov(temp, operand);
503 AddSub(rd, rn, temp, S, op);
504 }
505 } else {
506 AddSub(rd, rn, operand, S, op);
507 }
508}
509
510
511void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
512 const Register& rn,
513 const Operand& operand,
514 FlagsUpdate S,
515 AddSubWithCarryOp op) {
516 DCHECK(rd.SizeInBits() == rn.SizeInBits());
517 UseScratchRegisterScope temps(this);
518
519 if (operand.NeedsRelocation(this)) {
520 Register temp = temps.AcquireX();
521 Ldr(temp, operand.immediate());
522 AddSubWithCarryMacro(rd, rn, temp, S, op);
523
524 } else if (operand.IsImmediate() ||
525 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
526 // Add/sub with carry (immediate or ROR shifted register.)
527 Register temp = temps.AcquireSameSizeAs(rn);
528 Mov(temp, operand);
529 AddSubWithCarry(rd, rn, temp, S, op);
530
531 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
532 // Add/sub with carry (shifted register).
533 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
534 DCHECK(operand.shift() != ROR);
535 DCHECK(is_uintn(operand.shift_amount(),
536 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
537 : kWRegSizeInBitsLog2));
538 Register temp = temps.AcquireSameSizeAs(rn);
539 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
540 AddSubWithCarry(rd, rn, temp, S, op);
541
542 } else if (operand.IsExtendedRegister()) {
543 // Add/sub with carry (extended register).
544 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
545 // Add/sub extended supports a shift <= 4. We want to support exactly the
546 // same modes.
547 DCHECK(operand.shift_amount() <= 4);
548 DCHECK(operand.reg().Is64Bits() ||
549 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
550 Register temp = temps.AcquireSameSizeAs(rn);
551 EmitExtendShift(temp, operand.reg(), operand.extend(),
552 operand.shift_amount());
553 AddSubWithCarry(rd, rn, temp, S, op);
554
555 } else {
556 // The addressing mode is directly supported by the instruction.
557 AddSubWithCarry(rd, rn, operand, S, op);
558 }
559}
560
561
562void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
563 const MemOperand& addr,
564 LoadStoreOp op) {
565 int64_t offset = addr.offset();
566 LSDataSize size = CalcLSDataSize(op);
567
568 // Check if an immediate offset fits in the immediate field of the
569 // appropriate instruction. If not, emit two instructions to perform
570 // the operation.
571 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
572 !IsImmLSUnscaled(offset)) {
573 // Immediate offset that can't be encoded using unsigned or unscaled
574 // addressing modes.
575 UseScratchRegisterScope temps(this);
576 Register temp = temps.AcquireSameSizeAs(addr.base());
577 Mov(temp, addr.offset());
578 LoadStore(rt, MemOperand(addr.base(), temp), op);
579 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
580 // Post-index beyond unscaled addressing range.
581 LoadStore(rt, MemOperand(addr.base()), op);
582 add(addr.base(), addr.base(), offset);
583 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
584 // Pre-index beyond unscaled addressing range.
585 add(addr.base(), addr.base(), offset);
586 LoadStore(rt, MemOperand(addr.base()), op);
587 } else {
588 // Encodable in one load/store instruction.
589 LoadStore(rt, addr, op);
590 }
591}
592
593void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
594 const CPURegister& rt2,
595 const MemOperand& addr,
596 LoadStorePairOp op) {
597 // TODO(all): Should we support register offset for load-store-pair?
598 DCHECK(!addr.IsRegisterOffset());
599
600 int64_t offset = addr.offset();
601 LSDataSize size = CalcLSPairDataSize(op);
602
603 // Check if the offset fits in the immediate field of the appropriate
604 // instruction. If not, emit two instructions to perform the operation.
605 if (IsImmLSPair(offset, size)) {
606 // Encodable in one load/store pair instruction.
607 LoadStorePair(rt, rt2, addr, op);
608 } else {
609 Register base = addr.base();
610 if (addr.IsImmediateOffset()) {
611 UseScratchRegisterScope temps(this);
612 Register temp = temps.AcquireSameSizeAs(base);
613 Add(temp, base, offset);
614 LoadStorePair(rt, rt2, MemOperand(temp), op);
615 } else if (addr.IsPostIndex()) {
616 LoadStorePair(rt, rt2, MemOperand(base), op);
617 Add(base, base, offset);
618 } else {
619 DCHECK(addr.IsPreIndex());
620 Add(base, base, offset);
621 LoadStorePair(rt, rt2, MemOperand(base), op);
622 }
623 }
624}
625
626
627void MacroAssembler::Load(const Register& rt,
628 const MemOperand& addr,
629 Representation r) {
630 DCHECK(!r.IsDouble());
631
632 if (r.IsInteger8()) {
633 Ldrsb(rt, addr);
634 } else if (r.IsUInteger8()) {
635 Ldrb(rt, addr);
636 } else if (r.IsInteger16()) {
637 Ldrsh(rt, addr);
638 } else if (r.IsUInteger16()) {
639 Ldrh(rt, addr);
640 } else if (r.IsInteger32()) {
641 Ldr(rt.W(), addr);
642 } else {
643 DCHECK(rt.Is64Bits());
644 Ldr(rt, addr);
645 }
646}
647
648
649void MacroAssembler::Store(const Register& rt,
650 const MemOperand& addr,
651 Representation r) {
652 DCHECK(!r.IsDouble());
653
654 if (r.IsInteger8() || r.IsUInteger8()) {
655 Strb(rt, addr);
656 } else if (r.IsInteger16() || r.IsUInteger16()) {
657 Strh(rt, addr);
658 } else if (r.IsInteger32()) {
659 Str(rt.W(), addr);
660 } else {
661 DCHECK(rt.Is64Bits());
662 if (r.IsHeapObject()) {
663 AssertNotSmi(rt);
664 } else if (r.IsSmi()) {
665 AssertSmi(rt);
666 }
667 Str(rt, addr);
668 }
669}
670
671
672bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
673 Label *label, ImmBranchType b_type) {
674 bool need_longer_range = false;
675 // There are two situations in which we care about the offset being out of
676 // range:
677 // - The label is bound but too far away.
678 // - The label is not bound but linked, and the previous branch
679 // instruction in the chain is too far away.
680 if (label->is_bound() || label->is_linked()) {
681 need_longer_range =
682 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
683 }
684 if (!need_longer_range && !label->is_bound()) {
685 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
686 unresolved_branches_.insert(
687 std::pair<int, FarBranchInfo>(max_reachable_pc,
688 FarBranchInfo(pc_offset(), label)));
689 // Also maintain the next pool check.
690 next_veneer_pool_check_ =
691 Min(next_veneer_pool_check_,
692 max_reachable_pc - kVeneerDistanceCheckMargin);
693 }
694 return need_longer_range;
695}
696
697
698void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
699 DCHECK(allow_macro_instructions_);
700 DCHECK(!rd.IsZero());
701
702 if (hint == kAdrNear) {
703 adr(rd, label);
704 return;
705 }
706
707 DCHECK(hint == kAdrFar);
708 if (label->is_bound()) {
709 int label_offset = label->pos() - pc_offset();
710 if (Instruction::IsValidPCRelOffset(label_offset)) {
711 adr(rd, label);
712 } else {
713 DCHECK(label_offset <= 0);
714 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
715 adr(rd, min_adr_offset);
716 Add(rd, rd, label_offset - min_adr_offset);
717 }
718 } else {
719 UseScratchRegisterScope temps(this);
720 Register scratch = temps.AcquireX();
721
722 InstructionAccurateScope scope(
723 this, PatchingAssembler::kAdrFarPatchableNInstrs);
724 adr(rd, label);
725 for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
726 nop(ADR_FAR_NOP);
727 }
728 movz(scratch, 0);
729 }
730}
731
732
733void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
734 DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
735 (bit == -1 || type >= kBranchTypeFirstUsingBit));
736 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
737 B(static_cast<Condition>(type), label);
738 } else {
739 switch (type) {
740 case always: B(label); break;
741 case never: break;
742 case reg_zero: Cbz(reg, label); break;
743 case reg_not_zero: Cbnz(reg, label); break;
744 case reg_bit_clear: Tbz(reg, bit, label); break;
745 case reg_bit_set: Tbnz(reg, bit, label); break;
746 default:
747 UNREACHABLE();
748 }
749 }
750}
751
752
753void MacroAssembler::B(Label* label, Condition cond) {
754 DCHECK(allow_macro_instructions_);
755 DCHECK((cond != al) && (cond != nv));
756
757 Label done;
758 bool need_extra_instructions =
759 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
760
761 if (need_extra_instructions) {
762 b(&done, NegateCondition(cond));
763 B(label);
764 } else {
765 b(label, cond);
766 }
767 bind(&done);
768}
769
770
771void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
772 DCHECK(allow_macro_instructions_);
773
774 Label done;
775 bool need_extra_instructions =
776 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
777
778 if (need_extra_instructions) {
779 tbz(rt, bit_pos, &done);
780 B(label);
781 } else {
782 tbnz(rt, bit_pos, label);
783 }
784 bind(&done);
785}
786
787
788void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
789 DCHECK(allow_macro_instructions_);
790
791 Label done;
792 bool need_extra_instructions =
793 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
794
795 if (need_extra_instructions) {
796 tbnz(rt, bit_pos, &done);
797 B(label);
798 } else {
799 tbz(rt, bit_pos, label);
800 }
801 bind(&done);
802}
803
804
805void MacroAssembler::Cbnz(const Register& rt, Label* label) {
806 DCHECK(allow_macro_instructions_);
807
808 Label done;
809 bool need_extra_instructions =
810 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
811
812 if (need_extra_instructions) {
813 cbz(rt, &done);
814 B(label);
815 } else {
816 cbnz(rt, label);
817 }
818 bind(&done);
819}
820
821
822void MacroAssembler::Cbz(const Register& rt, Label* label) {
823 DCHECK(allow_macro_instructions_);
824
825 Label done;
826 bool need_extra_instructions =
827 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
828
829 if (need_extra_instructions) {
830 cbnz(rt, &done);
831 B(label);
832 } else {
833 cbz(rt, label);
834 }
835 bind(&done);
836}
837
838
839// Pseudo-instructions.
840
841
842void MacroAssembler::Abs(const Register& rd, const Register& rm,
843 Label* is_not_representable,
844 Label* is_representable) {
845 DCHECK(allow_macro_instructions_);
846 DCHECK(AreSameSizeAndType(rd, rm));
847
848 Cmp(rm, 1);
849 Cneg(rd, rm, lt);
850
851 // If the comparison sets the v flag, the input was the smallest value
852 // representable by rm, and the mathematical result of abs(rm) is not
853 // representable using two's complement.
854 if ((is_not_representable != NULL) && (is_representable != NULL)) {
855 B(is_not_representable, vs);
856 B(is_representable);
857 } else if (is_not_representable != NULL) {
858 B(is_not_representable, vs);
859 } else if (is_representable != NULL) {
860 B(is_representable, vc);
861 }
862}
863
864
865// Abstracted stack operations.
866
867
868void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
869 const CPURegister& src2, const CPURegister& src3) {
870 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
871
872 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
873 int size = src0.SizeInBytes();
874
875 PushPreamble(count, size);
876 PushHelper(count, size, src0, src1, src2, src3);
877}
878
879
880void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
881 const CPURegister& src2, const CPURegister& src3,
882 const CPURegister& src4, const CPURegister& src5,
883 const CPURegister& src6, const CPURegister& src7) {
884 DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
885
886 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
887 int size = src0.SizeInBytes();
888
889 PushPreamble(count, size);
890 PushHelper(4, size, src0, src1, src2, src3);
891 PushHelper(count - 4, size, src4, src5, src6, src7);
892}
893
894
895void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
896 const CPURegister& dst2, const CPURegister& dst3) {
897 // It is not valid to pop into the same register more than once in one
898 // instruction, not even into the zero register.
899 DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
900 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
901 DCHECK(dst0.IsValid());
902
903 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
904 int size = dst0.SizeInBytes();
905
906 PopHelper(count, size, dst0, dst1, dst2, dst3);
907 PopPostamble(count, size);
908}
909
910
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000911void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
912 const CPURegister& dst2, const CPURegister& dst3,
913 const CPURegister& dst4, const CPURegister& dst5,
914 const CPURegister& dst6, const CPURegister& dst7) {
915 // It is not valid to pop into the same register more than once in one
916 // instruction, not even into the zero register.
917 DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
918 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
919 DCHECK(dst0.IsValid());
920
921 int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
922 int size = dst0.SizeInBytes();
923
924 PopHelper(4, size, dst0, dst1, dst2, dst3);
925 PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
926 PopPostamble(count, size);
927}
928
929
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000930void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
931 int size = src0.SizeInBytes() + src1.SizeInBytes();
932
933 PushPreamble(size);
934 // Reserve room for src0 and push src1.
935 str(src1, MemOperand(StackPointer(), -size, PreIndex));
936 // Fill the gap with src0.
937 str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
938}
939
940
941void MacroAssembler::PushPopQueue::PushQueued(
942 PreambleDirective preamble_directive) {
943 if (queued_.empty()) return;
944
945 if (preamble_directive == WITH_PREAMBLE) {
946 masm_->PushPreamble(size_);
947 }
948
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000949 size_t count = queued_.size();
950 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000951 while (index < count) {
952 // PushHelper can only handle registers with the same size and type, and it
953 // can handle only four at a time. Batch them up accordingly.
954 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
955 int batch_index = 0;
956 do {
957 batch[batch_index++] = queued_[index++];
958 } while ((batch_index < 4) && (index < count) &&
959 batch[0].IsSameSizeAndType(queued_[index]));
960
961 masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
962 batch[0], batch[1], batch[2], batch[3]);
963 }
964
965 queued_.clear();
966}
967
968
969void MacroAssembler::PushPopQueue::PopQueued() {
970 if (queued_.empty()) return;
971
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000972 size_t count = queued_.size();
973 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000974 while (index < count) {
975 // PopHelper can only handle registers with the same size and type, and it
976 // can handle only four at a time. Batch them up accordingly.
977 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
978 int batch_index = 0;
979 do {
980 batch[batch_index++] = queued_[index++];
981 } while ((batch_index < 4) && (index < count) &&
982 batch[0].IsSameSizeAndType(queued_[index]));
983
984 masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
985 batch[0], batch[1], batch[2], batch[3]);
986 }
987
988 masm_->PopPostamble(size_);
989 queued_.clear();
990}
991
992
993void MacroAssembler::PushCPURegList(CPURegList registers) {
994 int size = registers.RegisterSizeInBytes();
995
996 PushPreamble(registers.Count(), size);
997 // Push up to four registers at a time because if the current stack pointer is
998 // csp and reg_size is 32, registers must be pushed in blocks of four in order
999 // to maintain the 16-byte alignment for csp.
1000 while (!registers.IsEmpty()) {
1001 int count_before = registers.Count();
1002 const CPURegister& src0 = registers.PopHighestIndex();
1003 const CPURegister& src1 = registers.PopHighestIndex();
1004 const CPURegister& src2 = registers.PopHighestIndex();
1005 const CPURegister& src3 = registers.PopHighestIndex();
1006 int count = count_before - registers.Count();
1007 PushHelper(count, size, src0, src1, src2, src3);
1008 }
1009}
1010
1011
1012void MacroAssembler::PopCPURegList(CPURegList registers) {
1013 int size = registers.RegisterSizeInBytes();
1014
1015 // Pop up to four registers at a time because if the current stack pointer is
1016 // csp and reg_size is 32, registers must be pushed in blocks of four in
1017 // order to maintain the 16-byte alignment for csp.
1018 while (!registers.IsEmpty()) {
1019 int count_before = registers.Count();
1020 const CPURegister& dst0 = registers.PopLowestIndex();
1021 const CPURegister& dst1 = registers.PopLowestIndex();
1022 const CPURegister& dst2 = registers.PopLowestIndex();
1023 const CPURegister& dst3 = registers.PopLowestIndex();
1024 int count = count_before - registers.Count();
1025 PopHelper(count, size, dst0, dst1, dst2, dst3);
1026 }
1027 PopPostamble(registers.Count(), size);
1028}
1029
1030
1031void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
1032 int size = src.SizeInBytes();
1033
1034 PushPreamble(count, size);
1035
1036 if (FLAG_optimize_for_size && count > 8) {
1037 UseScratchRegisterScope temps(this);
1038 Register temp = temps.AcquireX();
1039
1040 Label loop;
1041 __ Mov(temp, count / 2);
1042 __ Bind(&loop);
1043 PushHelper(2, size, src, src, NoReg, NoReg);
1044 __ Subs(temp, temp, 1);
1045 __ B(ne, &loop);
1046
1047 count %= 2;
1048 }
1049
1050 // Push up to four registers at a time if possible because if the current
1051 // stack pointer is csp and the register size is 32, registers must be pushed
1052 // in blocks of four in order to maintain the 16-byte alignment for csp.
1053 while (count >= 4) {
1054 PushHelper(4, size, src, src, src, src);
1055 count -= 4;
1056 }
1057 if (count >= 2) {
1058 PushHelper(2, size, src, src, NoReg, NoReg);
1059 count -= 2;
1060 }
1061 if (count == 1) {
1062 PushHelper(1, size, src, NoReg, NoReg, NoReg);
1063 count -= 1;
1064 }
1065 DCHECK(count == 0);
1066}
1067
1068
1069void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1070 PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
1071
1072 UseScratchRegisterScope temps(this);
1073 Register temp = temps.AcquireSameSizeAs(count);
1074
1075 if (FLAG_optimize_for_size) {
1076 Label loop, done;
1077
1078 Subs(temp, count, 1);
1079 B(mi, &done);
1080
1081 // Push all registers individually, to save code size.
1082 Bind(&loop);
1083 Subs(temp, temp, 1);
1084 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1085 B(pl, &loop);
1086
1087 Bind(&done);
1088 } else {
1089 Label loop, leftover2, leftover1, done;
1090
1091 Subs(temp, count, 4);
1092 B(mi, &leftover2);
1093
1094 // Push groups of four first.
1095 Bind(&loop);
1096 Subs(temp, temp, 4);
1097 PushHelper(4, src.SizeInBytes(), src, src, src, src);
1098 B(pl, &loop);
1099
1100 // Push groups of two.
1101 Bind(&leftover2);
1102 Tbz(count, 1, &leftover1);
1103 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
1104
1105 // Push the last one (if required).
1106 Bind(&leftover1);
1107 Tbz(count, 0, &done);
1108 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1109
1110 Bind(&done);
1111 }
1112}
1113
1114
1115void MacroAssembler::PushHelper(int count, int size,
1116 const CPURegister& src0,
1117 const CPURegister& src1,
1118 const CPURegister& src2,
1119 const CPURegister& src3) {
1120 // Ensure that we don't unintentially modify scratch or debug registers.
1121 InstructionAccurateScope scope(this);
1122
1123 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1124 DCHECK(size == src0.SizeInBytes());
1125
1126 // When pushing multiple registers, the store order is chosen such that
1127 // Push(a, b) is equivalent to Push(a) followed by Push(b).
1128 switch (count) {
1129 case 1:
1130 DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1131 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
1132 break;
1133 case 2:
1134 DCHECK(src2.IsNone() && src3.IsNone());
1135 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
1136 break;
1137 case 3:
1138 DCHECK(src3.IsNone());
1139 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
1140 str(src0, MemOperand(StackPointer(), 2 * size));
1141 break;
1142 case 4:
1143 // Skip over 4 * size, then fill in the gap. This allows four W registers
1144 // to be pushed using csp, whilst maintaining 16-byte alignment for csp
1145 // at all times.
1146 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
1147 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
1148 break;
1149 default:
1150 UNREACHABLE();
1151 }
1152}
1153
1154
1155void MacroAssembler::PopHelper(int count, int size,
1156 const CPURegister& dst0,
1157 const CPURegister& dst1,
1158 const CPURegister& dst2,
1159 const CPURegister& dst3) {
1160 // Ensure that we don't unintentially modify scratch or debug registers.
1161 InstructionAccurateScope scope(this);
1162
1163 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1164 DCHECK(size == dst0.SizeInBytes());
1165
1166 // When popping multiple registers, the load order is chosen such that
1167 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1168 switch (count) {
1169 case 1:
1170 DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1171 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1172 break;
1173 case 2:
1174 DCHECK(dst2.IsNone() && dst3.IsNone());
1175 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1176 break;
1177 case 3:
1178 DCHECK(dst3.IsNone());
1179 ldr(dst2, MemOperand(StackPointer(), 2 * size));
1180 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1181 break;
1182 case 4:
1183 // Load the higher addresses first, then load the lower addresses and
1184 // skip the whole block in the second instruction. This allows four W
1185 // registers to be popped using csp, whilst maintaining 16-byte alignment
1186 // for csp at all times.
1187 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1188 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1189 break;
1190 default:
1191 UNREACHABLE();
1192 }
1193}
1194
1195
1196void MacroAssembler::PushPreamble(Operand total_size) {
1197 if (csp.Is(StackPointer())) {
1198 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1199 // on entry and the total size of the specified registers must also be a
1200 // multiple of 16 bytes.
1201 if (total_size.IsImmediate()) {
1202 DCHECK((total_size.ImmediateValue() % 16) == 0);
1203 }
1204
1205 // Don't check access size for non-immediate sizes. It's difficult to do
1206 // well, and it will be caught by hardware (or the simulator) anyway.
1207 } else {
1208 // Even if the current stack pointer is not the system stack pointer (csp),
1209 // the system stack pointer will still be modified in order to comply with
1210 // ABI rules about accessing memory below the system stack pointer.
1211 BumpSystemStackPointer(total_size);
1212 }
1213}
1214
1215
1216void MacroAssembler::PopPostamble(Operand total_size) {
1217 if (csp.Is(StackPointer())) {
1218 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1219 // on entry and the total size of the specified registers must also be a
1220 // multiple of 16 bytes.
1221 if (total_size.IsImmediate()) {
1222 DCHECK((total_size.ImmediateValue() % 16) == 0);
1223 }
1224
1225 // Don't check access size for non-immediate sizes. It's difficult to do
1226 // well, and it will be caught by hardware (or the simulator) anyway.
1227 } else if (emit_debug_code()) {
1228 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1229 // but if we keep it matching StackPointer, the simulator can detect memory
1230 // accesses in the now-free part of the stack.
1231 SyncSystemStackPointer();
1232 }
1233}
1234
1235
1236void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
1237 if (offset.IsImmediate()) {
1238 DCHECK(offset.ImmediateValue() >= 0);
1239 } else if (emit_debug_code()) {
1240 Cmp(xzr, offset);
1241 Check(le, kStackAccessBelowStackPointer);
1242 }
1243
1244 Str(src, MemOperand(StackPointer(), offset));
1245}
1246
1247
1248void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1249 if (offset.IsImmediate()) {
1250 DCHECK(offset.ImmediateValue() >= 0);
1251 } else if (emit_debug_code()) {
1252 Cmp(xzr, offset);
1253 Check(le, kStackAccessBelowStackPointer);
1254 }
1255
1256 Ldr(dst, MemOperand(StackPointer(), offset));
1257}
1258
1259
1260void MacroAssembler::PokePair(const CPURegister& src1,
1261 const CPURegister& src2,
1262 int offset) {
1263 DCHECK(AreSameSizeAndType(src1, src2));
1264 DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1265 Stp(src1, src2, MemOperand(StackPointer(), offset));
1266}
1267
1268
1269void MacroAssembler::PeekPair(const CPURegister& dst1,
1270 const CPURegister& dst2,
1271 int offset) {
1272 DCHECK(AreSameSizeAndType(dst1, dst2));
1273 DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1274 Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
1275}
1276
1277
1278void MacroAssembler::PushCalleeSavedRegisters() {
1279 // Ensure that the macro-assembler doesn't use any scratch registers.
1280 InstructionAccurateScope scope(this);
1281
1282 // This method must not be called unless the current stack pointer is the
1283 // system stack pointer (csp).
1284 DCHECK(csp.Is(StackPointer()));
1285
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001286 MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001287
1288 stp(d14, d15, tos);
1289 stp(d12, d13, tos);
1290 stp(d10, d11, tos);
1291 stp(d8, d9, tos);
1292
1293 stp(x29, x30, tos);
1294 stp(x27, x28, tos); // x28 = jssp
1295 stp(x25, x26, tos);
1296 stp(x23, x24, tos);
1297 stp(x21, x22, tos);
1298 stp(x19, x20, tos);
1299}
1300
1301
1302void MacroAssembler::PopCalleeSavedRegisters() {
1303 // Ensure that the macro-assembler doesn't use any scratch registers.
1304 InstructionAccurateScope scope(this);
1305
1306 // This method must not be called unless the current stack pointer is the
1307 // system stack pointer (csp).
1308 DCHECK(csp.Is(StackPointer()));
1309
1310 MemOperand tos(csp, 2 * kXRegSize, PostIndex);
1311
1312 ldp(x19, x20, tos);
1313 ldp(x21, x22, tos);
1314 ldp(x23, x24, tos);
1315 ldp(x25, x26, tos);
1316 ldp(x27, x28, tos); // x28 = jssp
1317 ldp(x29, x30, tos);
1318
1319 ldp(d8, d9, tos);
1320 ldp(d10, d11, tos);
1321 ldp(d12, d13, tos);
1322 ldp(d14, d15, tos);
1323}
1324
1325
1326void MacroAssembler::AssertStackConsistency() {
1327 // Avoid emitting code when !use_real_abort() since non-real aborts cause too
1328 // much code to be generated.
1329 if (emit_debug_code() && use_real_aborts()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001330 if (csp.Is(StackPointer())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001331 // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
1332 // can't check the alignment of csp without using a scratch register (or
1333 // clobbering the flags), but the processor (or simulator) will abort if
1334 // it is not properly aligned during a load.
1335 ldr(xzr, MemOperand(csp, 0));
1336 }
1337 if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
1338 Label ok;
1339 // Check that csp <= StackPointer(), preserving all registers and NZCV.
1340 sub(StackPointer(), csp, StackPointer());
1341 cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
1342 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
1343
1344 // Avoid generating AssertStackConsistency checks for the Push in Abort.
1345 { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001346 // Restore StackPointer().
1347 sub(StackPointer(), csp, StackPointer());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001348 Abort(kTheCurrentStackPointerIsBelowCsp);
1349 }
1350
1351 bind(&ok);
1352 // Restore StackPointer().
1353 sub(StackPointer(), csp, StackPointer());
1354 }
1355 }
1356}
1357
1358
1359void MacroAssembler::AssertFPCRState(Register fpcr) {
1360 if (emit_debug_code()) {
1361 Label unexpected_mode, done;
1362 UseScratchRegisterScope temps(this);
1363 if (fpcr.IsNone()) {
1364 fpcr = temps.AcquireX();
1365 Mrs(fpcr, FPCR);
1366 }
1367
1368 // Settings overridden by ConfiugreFPCR():
1369 // - Assert that default-NaN mode is set.
1370 Tbz(fpcr, DN_offset, &unexpected_mode);
1371
1372 // Settings left to their default values:
1373 // - Assert that flush-to-zero is not set.
1374 Tbnz(fpcr, FZ_offset, &unexpected_mode);
1375 // - Assert that the rounding mode is nearest-with-ties-to-even.
1376 STATIC_ASSERT(FPTieEven == 0);
1377 Tst(fpcr, RMode_mask);
1378 B(eq, &done);
1379
1380 Bind(&unexpected_mode);
1381 Abort(kUnexpectedFPCRMode);
1382
1383 Bind(&done);
1384 }
1385}
1386
1387
1388void MacroAssembler::ConfigureFPCR() {
1389 UseScratchRegisterScope temps(this);
1390 Register fpcr = temps.AcquireX();
1391 Mrs(fpcr, FPCR);
1392
1393 // If necessary, enable default-NaN mode. The default values of the other FPCR
1394 // options should be suitable, and AssertFPCRState will verify that.
1395 Label no_write_required;
1396 Tbnz(fpcr, DN_offset, &no_write_required);
1397
1398 Orr(fpcr, fpcr, DN_mask);
1399 Msr(FPCR, fpcr);
1400
1401 Bind(&no_write_required);
1402 AssertFPCRState(fpcr);
1403}
1404
1405
1406void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
1407 const FPRegister& src) {
1408 AssertFPCRState();
1409
1410 // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs except
1411 // for NaNs, which become the default NaN. We use fsub rather than fadd
1412 // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
1413 Fsub(dst, src, fp_zero);
1414}
1415
1416
1417void MacroAssembler::LoadRoot(CPURegister destination,
1418 Heap::RootListIndex index) {
1419 // TODO(jbramley): Most root values are constants, and can be synthesized
1420 // without a load. Refer to the ARM back end for details.
1421 Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
1422}
1423
1424
1425void MacroAssembler::StoreRoot(Register source,
1426 Heap::RootListIndex index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001427 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001428 Str(source, MemOperand(root, index << kPointerSizeLog2));
1429}
1430
1431
1432void MacroAssembler::LoadTrueFalseRoots(Register true_root,
1433 Register false_root) {
1434 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
1435 Ldp(true_root, false_root,
1436 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
1437}
1438
1439
1440void MacroAssembler::LoadHeapObject(Register result,
1441 Handle<HeapObject> object) {
1442 AllowDeferredHandleDereference using_raw_address;
1443 if (isolate()->heap()->InNewSpace(*object)) {
1444 Handle<Cell> cell = isolate()->factory()->NewCell(object);
1445 Mov(result, Operand(cell));
1446 Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
1447 } else {
1448 Mov(result, Operand(object));
1449 }
1450}
1451
1452
1453void MacroAssembler::LoadInstanceDescriptors(Register map,
1454 Register descriptors) {
1455 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
1456}
1457
1458
1459void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
1460 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
1461 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
1462}
1463
1464
1465void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
1466 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
1467 Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
1468 And(dst, dst, Map::EnumLengthBits::kMask);
1469}
1470
1471
1472void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
1473 EnumLengthUntagged(dst, map);
1474 SmiTag(dst, dst);
1475}
1476
1477
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001478void MacroAssembler::LoadAccessor(Register dst, Register holder,
1479 int accessor_index,
1480 AccessorComponent accessor) {
1481 Ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
1482 LoadInstanceDescriptors(dst, dst);
1483 Ldr(dst,
1484 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
1485 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
1486 : AccessorPair::kSetterOffset;
1487 Ldr(dst, FieldMemOperand(dst, offset));
1488}
1489
1490
Ben Murdoch097c5b22016-05-18 11:27:45 +01001491void MacroAssembler::CheckEnumCache(Register object, Register scratch0,
1492 Register scratch1, Register scratch2,
1493 Register scratch3, Register scratch4,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001494 Label* call_runtime) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001495 DCHECK(!AreAliased(object, scratch0, scratch1, scratch2, scratch3, scratch4));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001496
1497 Register empty_fixed_array_value = scratch0;
1498 Register current_object = scratch1;
Ben Murdoch097c5b22016-05-18 11:27:45 +01001499 Register null_value = scratch4;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001500
1501 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1502 Label next, start;
1503
1504 Mov(current_object, object);
1505
1506 // Check if the enum length field is properly initialized, indicating that
1507 // there is an enum cache.
1508 Register map = scratch2;
1509 Register enum_length = scratch3;
1510 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1511
1512 EnumLengthUntagged(enum_length, map);
1513 Cmp(enum_length, kInvalidEnumCacheSentinel);
1514 B(eq, call_runtime);
1515
Ben Murdoch097c5b22016-05-18 11:27:45 +01001516 LoadRoot(null_value, Heap::kNullValueRootIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001517 B(&start);
1518
1519 Bind(&next);
1520 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1521
1522 // For all objects but the receiver, check that the cache is empty.
1523 EnumLengthUntagged(enum_length, map);
1524 Cbnz(enum_length, call_runtime);
1525
1526 Bind(&start);
1527
1528 // Check that there are no elements. Register current_object contains the
1529 // current JS object we've reached through the prototype chain.
1530 Label no_elements;
1531 Ldr(current_object, FieldMemOperand(current_object,
1532 JSObject::kElementsOffset));
1533 Cmp(current_object, empty_fixed_array_value);
1534 B(eq, &no_elements);
1535
1536 // Second chance, the object may be using the empty slow element dictionary.
1537 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1538 B(ne, call_runtime);
1539
1540 Bind(&no_elements);
1541 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
1542 Cmp(current_object, null_value);
1543 B(ne, &next);
1544}
1545
1546
1547void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
1548 Register scratch1,
1549 Register scratch2,
1550 Label* no_memento_found) {
1551 ExternalReference new_space_start =
1552 ExternalReference::new_space_start(isolate());
1553 ExternalReference new_space_allocation_top =
1554 ExternalReference::new_space_allocation_top_address(isolate());
1555
1556 Add(scratch1, receiver,
1557 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
1558 Cmp(scratch1, new_space_start);
1559 B(lt, no_memento_found);
1560
1561 Mov(scratch2, new_space_allocation_top);
1562 Ldr(scratch2, MemOperand(scratch2));
1563 Cmp(scratch1, scratch2);
1564 B(gt, no_memento_found);
1565
1566 Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
1567 Cmp(scratch1,
1568 Operand(isolate()->factory()->allocation_memento_map()));
1569}
1570
1571
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001572void MacroAssembler::InNewSpace(Register object,
1573 Condition cond,
1574 Label* branch) {
1575 DCHECK(cond == eq || cond == ne);
1576 UseScratchRegisterScope temps(this);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001577 const int mask =
1578 (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
1579 CheckPageFlag(object, temps.AcquireSameSizeAs(object), mask, cond, branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001580}
1581
1582
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001583void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
1584 if (emit_debug_code()) {
1585 STATIC_ASSERT(kSmiTag == 0);
1586 Tst(object, kSmiTagMask);
1587 Check(eq, reason);
1588 }
1589}
1590
1591
1592void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
1593 if (emit_debug_code()) {
1594 STATIC_ASSERT(kSmiTag == 0);
1595 Tst(object, kSmiTagMask);
1596 Check(ne, reason);
1597 }
1598}
1599
1600
1601void MacroAssembler::AssertName(Register object) {
1602 if (emit_debug_code()) {
1603 AssertNotSmi(object, kOperandIsASmiAndNotAName);
1604
1605 UseScratchRegisterScope temps(this);
1606 Register temp = temps.AcquireX();
1607
1608 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1609 CompareInstanceType(temp, temp, LAST_NAME_TYPE);
1610 Check(ls, kOperandIsNotAName);
1611 }
1612}
1613
1614
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001615void MacroAssembler::AssertFunction(Register object) {
1616 if (emit_debug_code()) {
1617 AssertNotSmi(object, kOperandIsASmiAndNotAFunction);
1618
1619 UseScratchRegisterScope temps(this);
1620 Register temp = temps.AcquireX();
1621
1622 CompareObjectType(object, temp, temp, JS_FUNCTION_TYPE);
1623 Check(eq, kOperandIsNotAFunction);
1624 }
1625}
1626
1627
1628void MacroAssembler::AssertBoundFunction(Register object) {
1629 if (emit_debug_code()) {
1630 AssertNotSmi(object, kOperandIsASmiAndNotABoundFunction);
1631
1632 UseScratchRegisterScope temps(this);
1633 Register temp = temps.AcquireX();
1634
1635 CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
1636 Check(eq, kOperandIsNotABoundFunction);
1637 }
1638}
1639
1640
Ben Murdoch097c5b22016-05-18 11:27:45 +01001641void MacroAssembler::AssertReceiver(Register object) {
1642 if (emit_debug_code()) {
1643 AssertNotSmi(object, kOperandIsASmiAndNotAReceiver);
1644
1645 UseScratchRegisterScope temps(this);
1646 Register temp = temps.AcquireX();
1647
1648 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1649 CompareObjectType(object, temp, temp, FIRST_JS_RECEIVER_TYPE);
1650 Check(hs, kOperandIsNotAReceiver);
1651 }
1652}
1653
1654
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001655void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1656 Register scratch) {
1657 if (emit_debug_code()) {
1658 Label done_checking;
1659 AssertNotSmi(object);
1660 JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
1661 Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1662 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1663 Assert(eq, kExpectedUndefinedOrCell);
1664 Bind(&done_checking);
1665 }
1666}
1667
1668
1669void MacroAssembler::AssertString(Register object) {
1670 if (emit_debug_code()) {
1671 UseScratchRegisterScope temps(this);
1672 Register temp = temps.AcquireX();
1673 STATIC_ASSERT(kSmiTag == 0);
1674 Tst(object, kSmiTagMask);
1675 Check(ne, kOperandIsASmiAndNotAString);
1676 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1677 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
1678 Check(lo, kOperandIsNotAString);
1679 }
1680}
1681
1682
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001683void MacroAssembler::AssertPositiveOrZero(Register value) {
1684 if (emit_debug_code()) {
1685 Label done;
1686 int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
1687 Tbz(value, sign_bit, &done);
1688 Abort(kUnexpectedNegativeValue);
1689 Bind(&done);
1690 }
1691}
1692
Ben Murdoch097c5b22016-05-18 11:27:45 +01001693void MacroAssembler::AssertNumber(Register value) {
1694 if (emit_debug_code()) {
1695 Label done;
1696 JumpIfSmi(value, &done);
1697 JumpIfHeapNumber(value, &done);
1698 Abort(kOperandIsNotANumber);
1699 Bind(&done);
1700 }
1701}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001702
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001703void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1704 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1705 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1706}
1707
1708
1709void MacroAssembler::TailCallStub(CodeStub* stub) {
1710 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1711}
1712
1713
1714void MacroAssembler::CallRuntime(const Runtime::Function* f,
1715 int num_arguments,
1716 SaveFPRegsMode save_doubles) {
1717 // All arguments must be on the stack before this function is called.
1718 // x0 holds the return value after the call.
1719
1720 // Check that the number of arguments matches what the function expects.
1721 // If f->nargs is -1, the function can accept a variable number of arguments.
1722 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1723
1724 // Place the necessary arguments.
1725 Mov(x0, num_arguments);
1726 Mov(x1, ExternalReference(f, isolate()));
1727
1728 CEntryStub stub(isolate(), 1, save_doubles);
1729 CallStub(&stub);
1730}
1731
1732
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001733void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1734 int num_arguments) {
1735 Mov(x0, num_arguments);
1736 Mov(x1, ext);
1737
1738 CEntryStub stub(isolate(), 1);
1739 CallStub(&stub);
1740}
1741
1742
1743void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
1744 Mov(x1, builtin);
1745 CEntryStub stub(isolate(), 1);
1746 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1747}
1748
1749
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001750void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1751 const Runtime::Function* function = Runtime::FunctionForId(fid);
1752 DCHECK_EQ(1, function->result_size);
1753 if (function->nargs >= 0) {
1754 // TODO(1236192): Most runtime routines don't need the number of
1755 // arguments passed in because it is constant. At some point we
1756 // should remove this need and make the runtime routine entry code
1757 // smarter.
1758 Mov(x0, function->nargs);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001759 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001760 JumpToExternalReference(ExternalReference(fid, isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001761}
1762
1763
1764void MacroAssembler::InitializeNewString(Register string,
1765 Register length,
1766 Heap::RootListIndex map_index,
1767 Register scratch1,
1768 Register scratch2) {
1769 DCHECK(!AreAliased(string, length, scratch1, scratch2));
1770 LoadRoot(scratch2, map_index);
1771 SmiTag(scratch1, length);
1772 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1773
1774 Mov(scratch2, String::kEmptyHashField);
1775 Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1776 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
1777}
1778
1779
1780int MacroAssembler::ActivationFrameAlignment() {
1781#if V8_HOST_ARCH_ARM64
1782 // Running on the real platform. Use the alignment as mandated by the local
1783 // environment.
1784 // Note: This will break if we ever start generating snapshots on one ARM
1785 // platform for another ARM platform with a different alignment.
1786 return base::OS::ActivationFrameAlignment();
1787#else // V8_HOST_ARCH_ARM64
1788 // If we are using the simulator then we should always align to the expected
1789 // alignment. As the simulator is used to generate snapshots we do not know
1790 // if the target platform will need alignment, so this is controlled from a
1791 // flag.
1792 return FLAG_sim_stack_alignment;
1793#endif // V8_HOST_ARCH_ARM64
1794}
1795
1796
1797void MacroAssembler::CallCFunction(ExternalReference function,
1798 int num_of_reg_args) {
1799 CallCFunction(function, num_of_reg_args, 0);
1800}
1801
1802
1803void MacroAssembler::CallCFunction(ExternalReference function,
1804 int num_of_reg_args,
1805 int num_of_double_args) {
1806 UseScratchRegisterScope temps(this);
1807 Register temp = temps.AcquireX();
1808 Mov(temp, function);
1809 CallCFunction(temp, num_of_reg_args, num_of_double_args);
1810}
1811
1812
1813void MacroAssembler::CallCFunction(Register function,
1814 int num_of_reg_args,
1815 int num_of_double_args) {
1816 DCHECK(has_frame());
1817 // We can pass 8 integer arguments in registers. If we need to pass more than
1818 // that, we'll need to implement support for passing them on the stack.
1819 DCHECK(num_of_reg_args <= 8);
1820
1821 // If we're passing doubles, we're limited to the following prototypes
1822 // (defined by ExternalReference::Type):
1823 // BUILTIN_COMPARE_CALL: int f(double, double)
1824 // BUILTIN_FP_FP_CALL: double f(double, double)
1825 // BUILTIN_FP_CALL: double f(double)
1826 // BUILTIN_FP_INT_CALL: double f(double, int)
1827 if (num_of_double_args > 0) {
1828 DCHECK(num_of_reg_args <= 1);
1829 DCHECK((num_of_double_args + num_of_reg_args) <= 2);
1830 }
1831
1832
1833 // If the stack pointer is not csp, we need to derive an aligned csp from the
1834 // current stack pointer.
1835 const Register old_stack_pointer = StackPointer();
1836 if (!csp.Is(old_stack_pointer)) {
1837 AssertStackConsistency();
1838
1839 int sp_alignment = ActivationFrameAlignment();
1840 // The ABI mandates at least 16-byte alignment.
1841 DCHECK(sp_alignment >= 16);
1842 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
1843
1844 // The current stack pointer is a callee saved register, and is preserved
1845 // across the call.
1846 DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
1847
1848 // Align and synchronize the system stack pointer with jssp.
1849 Bic(csp, old_stack_pointer, sp_alignment - 1);
1850 SetStackPointer(csp);
1851 }
1852
1853 // Call directly. The function called cannot cause a GC, or allow preemption,
1854 // so the return address in the link register stays correct.
1855 Call(function);
1856
1857 if (!csp.Is(old_stack_pointer)) {
1858 if (emit_debug_code()) {
1859 // Because the stack pointer must be aligned on a 16-byte boundary, the
1860 // aligned csp can be up to 12 bytes below the jssp. This is the case
1861 // where we only pushed one W register on top of an aligned jssp.
1862 UseScratchRegisterScope temps(this);
1863 Register temp = temps.AcquireX();
1864 DCHECK(ActivationFrameAlignment() == 16);
1865 Sub(temp, csp, old_stack_pointer);
1866 // We want temp <= 0 && temp >= -12.
1867 Cmp(temp, 0);
1868 Ccmp(temp, -12, NFlag, le);
1869 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
1870 }
1871 SetStackPointer(old_stack_pointer);
1872 }
1873}
1874
1875
1876void MacroAssembler::Jump(Register target) {
1877 Br(target);
1878}
1879
1880
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001881void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
1882 Condition cond) {
1883 if (cond == nv) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001884 UseScratchRegisterScope temps(this);
1885 Register temp = temps.AcquireX();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001886 Label done;
1887 if (cond != al) B(NegateCondition(cond), &done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001888 Mov(temp, Operand(target, rmode));
1889 Br(temp);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001890 Bind(&done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001891}
1892
1893
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001894void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
1895 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001896 DCHECK(!RelocInfo::IsCodeTarget(rmode));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001897 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001898}
1899
1900
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001901void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
1902 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001903 DCHECK(RelocInfo::IsCodeTarget(rmode));
1904 AllowDeferredHandleDereference embedding_raw_address;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001905 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001906}
1907
1908
1909void MacroAssembler::Call(Register target) {
1910 BlockPoolsScope scope(this);
1911#ifdef DEBUG
1912 Label start_call;
1913 Bind(&start_call);
1914#endif
1915
1916 Blr(target);
1917
1918#ifdef DEBUG
1919 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1920#endif
1921}
1922
1923
1924void MacroAssembler::Call(Label* target) {
1925 BlockPoolsScope scope(this);
1926#ifdef DEBUG
1927 Label start_call;
1928 Bind(&start_call);
1929#endif
1930
1931 Bl(target);
1932
1933#ifdef DEBUG
1934 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1935#endif
1936}
1937
1938
1939// MacroAssembler::CallSize is sensitive to changes in this function, as it
1940// requires to know how many instructions are used to branch to the target.
1941void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
1942 BlockPoolsScope scope(this);
1943#ifdef DEBUG
1944 Label start_call;
1945 Bind(&start_call);
1946#endif
1947 // Statement positions are expected to be recorded when the target
1948 // address is loaded.
1949 positions_recorder()->WriteRecordedPositions();
1950
1951 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1952 DCHECK(rmode != RelocInfo::NONE32);
1953
1954 UseScratchRegisterScope temps(this);
1955 Register temp = temps.AcquireX();
1956
1957 if (rmode == RelocInfo::NONE64) {
1958 // Addresses are 48 bits so we never need to load the upper 16 bits.
1959 uint64_t imm = reinterpret_cast<uint64_t>(target);
1960 // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
1961 DCHECK(((imm >> 48) & 0xffff) == 0);
1962 movz(temp, (imm >> 0) & 0xffff, 0);
1963 movk(temp, (imm >> 16) & 0xffff, 16);
1964 movk(temp, (imm >> 32) & 0xffff, 32);
1965 } else {
1966 Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
1967 }
1968 Blr(temp);
1969#ifdef DEBUG
1970 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
1971#endif
1972}
1973
1974
1975void MacroAssembler::Call(Handle<Code> code,
1976 RelocInfo::Mode rmode,
1977 TypeFeedbackId ast_id) {
1978#ifdef DEBUG
1979 Label start_call;
1980 Bind(&start_call);
1981#endif
1982
1983 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
1984 SetRecordedAstId(ast_id);
1985 rmode = RelocInfo::CODE_TARGET_WITH_ID;
1986 }
1987
1988 AllowDeferredHandleDereference embedding_raw_address;
1989 Call(reinterpret_cast<Address>(code.location()), rmode);
1990
1991#ifdef DEBUG
1992 // Check the size of the code generated.
1993 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
1994#endif
1995}
1996
1997
1998int MacroAssembler::CallSize(Register target) {
1999 USE(target);
2000 return kInstructionSize;
2001}
2002
2003
2004int MacroAssembler::CallSize(Label* target) {
2005 USE(target);
2006 return kInstructionSize;
2007}
2008
2009
2010int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
2011 USE(target);
2012
2013 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2014 DCHECK(rmode != RelocInfo::NONE32);
2015
2016 if (rmode == RelocInfo::NONE64) {
2017 return kCallSizeWithoutRelocation;
2018 } else {
2019 return kCallSizeWithRelocation;
2020 }
2021}
2022
2023
2024int MacroAssembler::CallSize(Handle<Code> code,
2025 RelocInfo::Mode rmode,
2026 TypeFeedbackId ast_id) {
2027 USE(code);
2028 USE(ast_id);
2029
2030 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2031 DCHECK(rmode != RelocInfo::NONE32);
2032
2033 if (rmode == RelocInfo::NONE64) {
2034 return kCallSizeWithoutRelocation;
2035 } else {
2036 return kCallSizeWithRelocation;
2037 }
2038}
2039
2040
2041void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
2042 SmiCheckType smi_check_type) {
2043 Label on_not_heap_number;
2044
2045 if (smi_check_type == DO_SMI_CHECK) {
2046 JumpIfSmi(object, &on_not_heap_number);
2047 }
2048
2049 AssertNotSmi(object);
2050
2051 UseScratchRegisterScope temps(this);
2052 Register temp = temps.AcquireX();
2053 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2054 JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
2055
2056 Bind(&on_not_heap_number);
2057}
2058
2059
2060void MacroAssembler::JumpIfNotHeapNumber(Register object,
2061 Label* on_not_heap_number,
2062 SmiCheckType smi_check_type) {
2063 if (smi_check_type == DO_SMI_CHECK) {
2064 JumpIfSmi(object, on_not_heap_number);
2065 }
2066
2067 AssertNotSmi(object);
2068
2069 UseScratchRegisterScope temps(this);
2070 Register temp = temps.AcquireX();
2071 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2072 JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
2073}
2074
2075
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002076void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
2077 FPRegister value,
2078 FPRegister scratch_d,
2079 Label* on_successful_conversion,
2080 Label* on_failed_conversion) {
2081 // Convert to an int and back again, then compare with the original value.
2082 Fcvtzs(as_int, value);
2083 Scvtf(scratch_d, as_int);
2084 Fcmp(value, scratch_d);
2085
2086 if (on_successful_conversion) {
2087 B(on_successful_conversion, eq);
2088 }
2089 if (on_failed_conversion) {
2090 B(on_failed_conversion, ne);
2091 }
2092}
2093
2094
2095void MacroAssembler::TestForMinusZero(DoubleRegister input) {
2096 UseScratchRegisterScope temps(this);
2097 Register temp = temps.AcquireX();
2098 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
2099 // cause overflow.
2100 Fmov(temp, input);
2101 Cmp(temp, 1);
2102}
2103
2104
2105void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
2106 Label* on_negative_zero) {
2107 TestForMinusZero(input);
2108 B(vs, on_negative_zero);
2109}
2110
2111
2112void MacroAssembler::JumpIfMinusZero(Register input,
2113 Label* on_negative_zero) {
2114 DCHECK(input.Is64Bits());
2115 // Floating point value is in an integer register. Detect -0.0 by subtracting
2116 // 1 (cmp), which will cause overflow.
2117 Cmp(input, 1);
2118 B(vs, on_negative_zero);
2119}
2120
2121
2122void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
2123 // Clamp the value to [0..255].
2124 Cmp(input.W(), Operand(input.W(), UXTB));
2125 // If input < input & 0xff, it must be < 0, so saturate to 0.
2126 Csel(output.W(), wzr, input.W(), lt);
2127 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
2128 Csel(output.W(), output.W(), 255, le);
2129}
2130
2131
2132void MacroAssembler::ClampInt32ToUint8(Register in_out) {
2133 ClampInt32ToUint8(in_out, in_out);
2134}
2135
2136
2137void MacroAssembler::ClampDoubleToUint8(Register output,
2138 DoubleRegister input,
2139 DoubleRegister dbl_scratch) {
2140 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
2141 // - Inputs lower than 0 (including -infinity) produce 0.
2142 // - Inputs higher than 255 (including +infinity) produce 255.
2143 // Also, it seems that PIXEL types use round-to-nearest rather than
2144 // round-towards-zero.
2145
2146 // Squash +infinity before the conversion, since Fcvtnu will normally
2147 // convert it to 0.
2148 Fmov(dbl_scratch, 255);
2149 Fmin(dbl_scratch, dbl_scratch, input);
2150
2151 // Convert double to unsigned integer. Values less than zero become zero.
2152 // Values greater than 255 have already been clamped to 255.
2153 Fcvtnu(output, dbl_scratch);
2154}
2155
2156
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002157void MacroAssembler::CopyBytes(Register dst,
2158 Register src,
2159 Register length,
2160 Register scratch,
2161 CopyHint hint) {
2162 UseScratchRegisterScope temps(this);
2163 Register tmp1 = temps.AcquireX();
2164 Register tmp2 = temps.AcquireX();
2165 DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
2166 DCHECK(!AreAliased(src, dst, csp));
2167
2168 if (emit_debug_code()) {
2169 // Check copy length.
2170 Cmp(length, 0);
2171 Assert(ge, kUnexpectedNegativeValue);
2172
2173 // Check src and dst buffers don't overlap.
2174 Add(scratch, src, length); // Calculate end of src buffer.
2175 Cmp(scratch, dst);
2176 Add(scratch, dst, length); // Calculate end of dst buffer.
2177 Ccmp(scratch, src, ZFlag, gt);
2178 Assert(le, kCopyBuffersOverlap);
2179 }
2180
2181 Label short_copy, short_loop, bulk_loop, done;
2182
2183 if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
2184 Register bulk_length = scratch;
2185 int pair_size = 2 * kXRegSize;
2186 int pair_mask = pair_size - 1;
2187
2188 Bic(bulk_length, length, pair_mask);
2189 Cbz(bulk_length, &short_copy);
2190 Bind(&bulk_loop);
2191 Sub(bulk_length, bulk_length, pair_size);
2192 Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
2193 Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
2194 Cbnz(bulk_length, &bulk_loop);
2195
2196 And(length, length, pair_mask);
2197 }
2198
2199 Bind(&short_copy);
2200 Cbz(length, &done);
2201 Bind(&short_loop);
2202 Sub(length, length, 1);
2203 Ldrb(tmp1, MemOperand(src, 1, PostIndex));
2204 Strb(tmp1, MemOperand(dst, 1, PostIndex));
2205 Cbnz(length, &short_loop);
2206
2207
2208 Bind(&done);
2209}
2210
2211
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002212void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
2213 Register end_address,
2214 Register filler) {
2215 DCHECK(!current_address.Is(csp));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002216 UseScratchRegisterScope temps(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002217 Register distance_in_words = temps.AcquireX();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002218 Label done;
2219
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002220 // Calculate the distance. If it's <= zero then there's nothing to do.
2221 Subs(distance_in_words, end_address, current_address);
2222 B(le, &done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002223
2224 // There's at least one field to fill, so do this unconditionally.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002225 Str(filler, MemOperand(current_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002226
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002227 // If the distance_in_words consists of odd number of words we advance
2228 // start_address by one word, otherwise the pairs loop will ovwerite the
2229 // field that was stored above.
2230 And(distance_in_words, distance_in_words, kPointerSize);
2231 Add(current_address, current_address, distance_in_words);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002232
2233 // Store filler to memory in pairs.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002234 Label loop, entry;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002235 B(&entry);
2236 Bind(&loop);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002237 Stp(filler, filler, MemOperand(current_address, 2 * kPointerSize, PostIndex));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002238 Bind(&entry);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002239 Cmp(current_address, end_address);
2240 B(lo, &loop);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002241
2242 Bind(&done);
2243}
2244
2245
2246void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
2247 Register first, Register second, Register scratch1, Register scratch2,
2248 Label* failure, SmiCheckType smi_check) {
2249 if (smi_check == DO_SMI_CHECK) {
2250 JumpIfEitherSmi(first, second, failure);
2251 } else if (emit_debug_code()) {
2252 DCHECK(smi_check == DONT_DO_SMI_CHECK);
2253 Label not_smi;
2254 JumpIfEitherSmi(first, second, NULL, &not_smi);
2255
2256 // At least one input is a smi, but the flags indicated a smi check wasn't
2257 // needed.
2258 Abort(kUnexpectedSmi);
2259
2260 Bind(&not_smi);
2261 }
2262
2263 // Test that both first and second are sequential one-byte strings.
2264 Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2265 Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2266 Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2267 Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2268
2269 JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
2270 scratch2, failure);
2271}
2272
2273
2274void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
2275 Register first, Register second, Register scratch1, Register scratch2,
2276 Label* failure) {
2277 DCHECK(!AreAliased(scratch1, second));
2278 DCHECK(!AreAliased(scratch1, scratch2));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002279 const int kFlatOneByteStringMask =
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002280 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002281 const int kFlatOneByteStringTag =
2282 kStringTag | kOneByteStringTag | kSeqStringTag;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002283 And(scratch1, first, kFlatOneByteStringMask);
2284 And(scratch2, second, kFlatOneByteStringMask);
2285 Cmp(scratch1, kFlatOneByteStringTag);
2286 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2287 B(ne, failure);
2288}
2289
2290
2291void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
2292 Register scratch,
2293 Label* failure) {
2294 const int kFlatOneByteStringMask =
2295 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2296 const int kFlatOneByteStringTag =
2297 kStringTag | kOneByteStringTag | kSeqStringTag;
2298 And(scratch, type, kFlatOneByteStringMask);
2299 Cmp(scratch, kFlatOneByteStringTag);
2300 B(ne, failure);
2301}
2302
2303
2304void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2305 Register first, Register second, Register scratch1, Register scratch2,
2306 Label* failure) {
2307 DCHECK(!AreAliased(first, second, scratch1, scratch2));
2308 const int kFlatOneByteStringMask =
2309 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2310 const int kFlatOneByteStringTag =
2311 kStringTag | kOneByteStringTag | kSeqStringTag;
2312 And(scratch1, first, kFlatOneByteStringMask);
2313 And(scratch2, second, kFlatOneByteStringMask);
2314 Cmp(scratch1, kFlatOneByteStringTag);
2315 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2316 B(ne, failure);
2317}
2318
2319
2320void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
2321 Label* not_unique_name) {
2322 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2323 // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
2324 // continue
2325 // } else {
2326 // goto not_unique_name
2327 // }
2328 Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
2329 Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
2330 B(ne, not_unique_name);
2331}
2332
2333
2334void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2335 const ParameterCount& actual,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002336 Label* done,
2337 InvokeFlag flag,
2338 bool* definitely_mismatches,
2339 const CallWrapper& call_wrapper) {
2340 bool definitely_matches = false;
2341 *definitely_mismatches = false;
2342 Label regular_invoke;
2343
2344 // Check whether the expected and actual arguments count match. If not,
2345 // setup registers according to contract with ArgumentsAdaptorTrampoline:
2346 // x0: actual arguments count.
2347 // x1: function (passed through to callee).
2348 // x2: expected arguments count.
2349
2350 // The code below is made a lot easier because the calling code already sets
2351 // up actual and expected registers according to the contract if values are
2352 // passed in registers.
2353 DCHECK(actual.is_immediate() || actual.reg().is(x0));
2354 DCHECK(expected.is_immediate() || expected.reg().is(x2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002355
2356 if (expected.is_immediate()) {
2357 DCHECK(actual.is_immediate());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002358 Mov(x0, actual.immediate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002359 if (expected.immediate() == actual.immediate()) {
2360 definitely_matches = true;
2361
2362 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002363 if (expected.immediate() ==
2364 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2365 // Don't worry about adapting arguments for builtins that
2366 // don't want that done. Skip adaption code by making it look
2367 // like we have a match between expected and actual number of
2368 // arguments.
2369 definitely_matches = true;
2370 } else {
2371 *definitely_mismatches = true;
2372 // Set up x2 for the argument adaptor.
2373 Mov(x2, expected.immediate());
2374 }
2375 }
2376
2377 } else { // expected is a register.
2378 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2379 : Operand(actual.reg());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002380 Mov(x0, actual_op);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002381 // If actual == expected perform a regular invocation.
2382 Cmp(expected.reg(), actual_op);
2383 B(eq, &regular_invoke);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002384 }
2385
2386 // If the argument counts may mismatch, generate a call to the argument
2387 // adaptor.
2388 if (!definitely_matches) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002389 Handle<Code> adaptor =
2390 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2391 if (flag == CALL_FUNCTION) {
2392 call_wrapper.BeforeCall(CallSize(adaptor));
2393 Call(adaptor);
2394 call_wrapper.AfterCall();
2395 if (!*definitely_mismatches) {
2396 // If the arg counts don't match, no extra code is emitted by
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002397 // MAsm::InvokeFunctionCode and we can just fall through.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002398 B(done);
2399 }
2400 } else {
2401 Jump(adaptor, RelocInfo::CODE_TARGET);
2402 }
2403 }
2404 Bind(&regular_invoke);
2405}
2406
2407
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002408void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
2409 const ParameterCount& expected,
2410 const ParameterCount& actual) {
2411 Label skip_flooding;
2412 ExternalReference step_in_enabled =
2413 ExternalReference::debug_step_in_enabled_address(isolate());
2414 Mov(x4, Operand(step_in_enabled));
2415 ldrb(x4, MemOperand(x4));
2416 CompareAndBranch(x4, Operand(0), eq, &skip_flooding);
2417 {
2418 FrameScope frame(this,
2419 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
2420 if (expected.is_reg()) {
2421 SmiTag(expected.reg());
2422 Push(expected.reg());
2423 }
2424 if (actual.is_reg()) {
2425 SmiTag(actual.reg());
2426 Push(actual.reg());
2427 }
2428 if (new_target.is_valid()) {
2429 Push(new_target);
2430 }
2431 Push(fun);
2432 Push(fun);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002433 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002434 Pop(fun);
2435 if (new_target.is_valid()) {
2436 Pop(new_target);
2437 }
2438 if (actual.is_reg()) {
2439 Pop(actual.reg());
2440 SmiUntag(actual.reg());
2441 }
2442 if (expected.is_reg()) {
2443 Pop(expected.reg());
2444 SmiUntag(expected.reg());
2445 }
2446 }
2447 bind(&skip_flooding);
2448}
2449
2450
2451void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
2452 const ParameterCount& expected,
2453 const ParameterCount& actual,
2454 InvokeFlag flag,
2455 const CallWrapper& call_wrapper) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002456 // You can't call a function without a valid frame.
2457 DCHECK(flag == JUMP_FUNCTION || has_frame());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002458 DCHECK(function.is(x1));
2459 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
2460
2461 FloodFunctionIfStepping(function, new_target, expected, actual);
2462
2463 // Clear the new.target register if not given.
2464 if (!new_target.is_valid()) {
2465 LoadRoot(x3, Heap::kUndefinedValueRootIndex);
2466 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002467
2468 Label done;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002469 bool definitely_mismatches = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002470 InvokePrologue(expected, actual, &done, flag, &definitely_mismatches,
2471 call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002472
2473 // If we are certain that actual != expected, then we know InvokePrologue will
2474 // have handled the call through the argument adaptor mechanism.
2475 // The called function expects the call kind in x5.
2476 if (!definitely_mismatches) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002477 // We call indirectly through the code field in the function to
2478 // allow recompilation to take effect without changing any of the
2479 // call sites.
2480 Register code = x4;
2481 Ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002482 if (flag == CALL_FUNCTION) {
2483 call_wrapper.BeforeCall(CallSize(code));
2484 Call(code);
2485 call_wrapper.AfterCall();
2486 } else {
2487 DCHECK(flag == JUMP_FUNCTION);
2488 Jump(code);
2489 }
2490 }
2491
2492 // Continue here if InvokePrologue does handle the invocation due to
2493 // mismatched parameter counts.
2494 Bind(&done);
2495}
2496
2497
2498void MacroAssembler::InvokeFunction(Register function,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002499 Register new_target,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002500 const ParameterCount& actual,
2501 InvokeFlag flag,
2502 const CallWrapper& call_wrapper) {
2503 // You can't call a function without a valid frame.
2504 DCHECK(flag == JUMP_FUNCTION || has_frame());
2505
2506 // Contract with called JS functions requires that function is passed in x1.
2507 // (See FullCodeGenerator::Generate().)
2508 DCHECK(function.is(x1));
2509
2510 Register expected_reg = x2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002511
2512 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2513 // The number of arguments is stored as an int32_t, and -1 is a marker
2514 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2515 // extension to correctly handle it.
2516 Ldr(expected_reg, FieldMemOperand(function,
2517 JSFunction::kSharedFunctionInfoOffset));
2518 Ldrsw(expected_reg,
2519 FieldMemOperand(expected_reg,
2520 SharedFunctionInfo::kFormalParameterCountOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002521
2522 ParameterCount expected(expected_reg);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002523 InvokeFunctionCode(function, new_target, expected, actual, flag,
2524 call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002525}
2526
2527
2528void MacroAssembler::InvokeFunction(Register function,
2529 const ParameterCount& expected,
2530 const ParameterCount& actual,
2531 InvokeFlag flag,
2532 const CallWrapper& call_wrapper) {
2533 // You can't call a function without a valid frame.
2534 DCHECK(flag == JUMP_FUNCTION || has_frame());
2535
2536 // Contract with called JS functions requires that function is passed in x1.
2537 // (See FullCodeGenerator::Generate().)
2538 DCHECK(function.Is(x1));
2539
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002540 // Set up the context.
2541 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2542
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002543 InvokeFunctionCode(function, no_reg, expected, actual, flag, call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002544}
2545
2546
2547void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2548 const ParameterCount& expected,
2549 const ParameterCount& actual,
2550 InvokeFlag flag,
2551 const CallWrapper& call_wrapper) {
2552 // Contract with called JS functions requires that function is passed in x1.
2553 // (See FullCodeGenerator::Generate().)
2554 __ LoadObject(x1, function);
2555 InvokeFunction(x1, expected, actual, flag, call_wrapper);
2556}
2557
2558
2559void MacroAssembler::TryConvertDoubleToInt64(Register result,
2560 DoubleRegister double_input,
2561 Label* done) {
2562 // Try to convert with an FPU convert instruction. It's trivial to compute
2563 // the modulo operation on an integer register so we convert to a 64-bit
2564 // integer.
2565 //
2566 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
2567 // when the double is out of range. NaNs and infinities will be converted to 0
2568 // (as ECMA-262 requires).
2569 Fcvtzs(result.X(), double_input);
2570
2571 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
2572 // representable using a double, so if the result is one of those then we know
2573 // that saturation occured, and we need to manually handle the conversion.
2574 //
2575 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2576 // 1 will cause signed overflow.
2577 Cmp(result.X(), 1);
2578 Ccmp(result.X(), -1, VFlag, vc);
2579
2580 B(vc, done);
2581}
2582
2583
2584void MacroAssembler::TruncateDoubleToI(Register result,
2585 DoubleRegister double_input) {
2586 Label done;
2587
2588 // Try to convert the double to an int64. If successful, the bottom 32 bits
2589 // contain our truncated int32 result.
2590 TryConvertDoubleToInt64(result, double_input, &done);
2591
2592 const Register old_stack_pointer = StackPointer();
2593 if (csp.Is(old_stack_pointer)) {
2594 // This currently only happens during compiler-unittest. If it arises
2595 // during regular code generation the DoubleToI stub should be updated to
2596 // cope with csp and have an extra parameter indicating which stack pointer
2597 // it should use.
2598 Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
2599 Mov(jssp, csp);
2600 SetStackPointer(jssp);
2601 }
2602
2603 // If we fell through then inline version didn't succeed - call stub instead.
2604 Push(lr, double_input);
2605
2606 DoubleToIStub stub(isolate(),
2607 jssp,
2608 result,
2609 0,
2610 true, // is_truncating
2611 true); // skip_fastpath
2612 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2613
2614 DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
2615 Pop(xzr, lr); // xzr to drop the double input on the stack.
2616
2617 if (csp.Is(old_stack_pointer)) {
2618 Mov(csp, jssp);
2619 SetStackPointer(csp);
2620 AssertStackConsistency();
2621 Pop(xzr, jssp);
2622 }
2623
2624 Bind(&done);
2625}
2626
2627
2628void MacroAssembler::TruncateHeapNumberToI(Register result,
2629 Register object) {
2630 Label done;
2631 DCHECK(!result.is(object));
2632 DCHECK(jssp.Is(StackPointer()));
2633
2634 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2635
2636 // Try to convert the double to an int64. If successful, the bottom 32 bits
2637 // contain our truncated int32 result.
2638 TryConvertDoubleToInt64(result, fp_scratch, &done);
2639
2640 // If we fell through then inline version didn't succeed - call stub instead.
2641 Push(lr);
2642 DoubleToIStub stub(isolate(),
2643 object,
2644 result,
2645 HeapNumber::kValueOffset - kHeapObjectTag,
2646 true, // is_truncating
2647 true); // skip_fastpath
2648 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2649 Pop(lr);
2650
2651 Bind(&done);
2652}
2653
2654
2655void MacroAssembler::StubPrologue() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002656 UseScratchRegisterScope temps(this);
2657 Register temp = temps.AcquireX();
2658 __ Mov(temp, Smi::FromInt(StackFrame::STUB));
2659 // Compiled stubs don't age, and so they don't need the predictable code
2660 // ageing sequence.
2661 __ Push(lr, fp, cp, temp);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002662 __ Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002663}
2664
2665
2666void MacroAssembler::Prologue(bool code_pre_aging) {
2667 if (code_pre_aging) {
2668 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
2669 __ EmitCodeAgeSequence(stub);
2670 } else {
2671 __ EmitFrameSetupForCodeAgePatching();
2672 }
2673}
2674
2675
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002676void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
2677 Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2678 Ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
2679 Ldr(vector,
2680 FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
2681}
2682
2683
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002684void MacroAssembler::EnterFrame(StackFrame::Type type,
2685 bool load_constant_pool_pointer_reg) {
2686 // Out-of-line constant pool not implemented on arm64.
2687 UNREACHABLE();
2688}
2689
2690
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002691void MacroAssembler::EnterFrame(StackFrame::Type type) {
2692 DCHECK(jssp.Is(StackPointer()));
2693 UseScratchRegisterScope temps(this);
2694 Register type_reg = temps.AcquireX();
2695 Register code_reg = temps.AcquireX();
2696
2697 Push(lr, fp, cp);
2698 Mov(type_reg, Smi::FromInt(type));
2699 Mov(code_reg, Operand(CodeObject()));
2700 Push(type_reg, code_reg);
2701 // jssp[4] : lr
2702 // jssp[3] : fp
2703 // jssp[2] : cp
2704 // jssp[1] : type
2705 // jssp[0] : code object
2706
2707 // Adjust FP to point to saved FP.
2708 Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
2709}
2710
2711
2712void MacroAssembler::LeaveFrame(StackFrame::Type type) {
2713 DCHECK(jssp.Is(StackPointer()));
2714 // Drop the execution stack down to the frame pointer and restore
2715 // the caller frame pointer and return address.
2716 Mov(jssp, fp);
2717 AssertStackConsistency();
2718 Pop(fp, lr);
2719}
2720
2721
2722void MacroAssembler::ExitFramePreserveFPRegs() {
2723 PushCPURegList(kCallerSavedFP);
2724}
2725
2726
2727void MacroAssembler::ExitFrameRestoreFPRegs() {
2728 // Read the registers from the stack without popping them. The stack pointer
2729 // will be reset as part of the unwinding process.
2730 CPURegList saved_fp_regs = kCallerSavedFP;
2731 DCHECK(saved_fp_regs.Count() % 2 == 0);
2732
2733 int offset = ExitFrameConstants::kLastExitFrameField;
2734 while (!saved_fp_regs.IsEmpty()) {
2735 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
2736 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2737 offset -= 2 * kDRegSize;
2738 Ldp(dst1, dst0, MemOperand(fp, offset));
2739 }
2740}
2741
2742
2743void MacroAssembler::EnterExitFrame(bool save_doubles,
2744 const Register& scratch,
2745 int extra_space) {
2746 DCHECK(jssp.Is(StackPointer()));
2747
2748 // Set up the new stack frame.
2749 Mov(scratch, Operand(CodeObject()));
2750 Push(lr, fp);
2751 Mov(fp, StackPointer());
2752 Push(xzr, scratch);
2753 // fp[8]: CallerPC (lr)
2754 // fp -> fp[0]: CallerFP (old fp)
2755 // fp[-8]: Space reserved for SPOffset.
2756 // jssp -> fp[-16]: CodeObject()
2757 STATIC_ASSERT((2 * kPointerSize) ==
2758 ExitFrameConstants::kCallerSPDisplacement);
2759 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
2760 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
2761 STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
2762 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
2763
2764 // Save the frame pointer and context pointer in the top frame.
2765 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2766 isolate())));
2767 Str(fp, MemOperand(scratch));
2768 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2769 isolate())));
2770 Str(cp, MemOperand(scratch));
2771
2772 STATIC_ASSERT((-2 * kPointerSize) ==
2773 ExitFrameConstants::kLastExitFrameField);
2774 if (save_doubles) {
2775 ExitFramePreserveFPRegs();
2776 }
2777
2778 // Reserve space for the return address and for user requested memory.
2779 // We do this before aligning to make sure that we end up correctly
2780 // aligned with the minimum of wasted space.
2781 Claim(extra_space + 1, kXRegSize);
2782 // fp[8]: CallerPC (lr)
2783 // fp -> fp[0]: CallerFP (old fp)
2784 // fp[-8]: Space reserved for SPOffset.
2785 // fp[-16]: CodeObject()
2786 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
2787 // jssp[8]: Extra space reserved for caller (if extra_space != 0).
2788 // jssp -> jssp[0]: Space reserved for the return address.
2789
2790 // Align and synchronize the system stack pointer with jssp.
2791 AlignAndSetCSPForFrame();
2792 DCHECK(csp.Is(StackPointer()));
2793
2794 // fp[8]: CallerPC (lr)
2795 // fp -> fp[0]: CallerFP (old fp)
2796 // fp[-8]: Space reserved for SPOffset.
2797 // fp[-16]: CodeObject()
2798 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
2799 // csp[8]: Memory reserved for the caller if extra_space != 0.
2800 // Alignment padding, if necessary.
2801 // csp -> csp[0]: Space reserved for the return address.
2802
2803 // ExitFrame::GetStateForFramePointer expects to find the return address at
2804 // the memory address immediately below the pointer stored in SPOffset.
2805 // It is not safe to derive much else from SPOffset, because the size of the
2806 // padding can vary.
2807 Add(scratch, csp, kXRegSize);
2808 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
2809}
2810
2811
2812// Leave the current exit frame.
2813void MacroAssembler::LeaveExitFrame(bool restore_doubles,
2814 const Register& scratch,
2815 bool restore_context) {
2816 DCHECK(csp.Is(StackPointer()));
2817
2818 if (restore_doubles) {
2819 ExitFrameRestoreFPRegs();
2820 }
2821
2822 // Restore the context pointer from the top frame.
2823 if (restore_context) {
2824 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2825 isolate())));
2826 Ldr(cp, MemOperand(scratch));
2827 }
2828
2829 if (emit_debug_code()) {
2830 // Also emit debug code to clear the cp in the top frame.
2831 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2832 isolate())));
2833 Str(xzr, MemOperand(scratch));
2834 }
2835 // Clear the frame pointer from the top frame.
2836 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2837 isolate())));
2838 Str(xzr, MemOperand(scratch));
2839
2840 // Pop the exit frame.
2841 // fp[8]: CallerPC (lr)
2842 // fp -> fp[0]: CallerFP (old fp)
2843 // fp[...]: The rest of the frame.
2844 Mov(jssp, fp);
2845 SetStackPointer(jssp);
2846 AssertStackConsistency();
2847 Pop(fp, lr);
2848}
2849
2850
2851void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2852 Register scratch1, Register scratch2) {
2853 if (FLAG_native_code_counters && counter->Enabled()) {
2854 Mov(scratch1, value);
2855 Mov(scratch2, ExternalReference(counter));
2856 Str(scratch1, MemOperand(scratch2));
2857 }
2858}
2859
2860
2861void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2862 Register scratch1, Register scratch2) {
2863 DCHECK(value != 0);
2864 if (FLAG_native_code_counters && counter->Enabled()) {
2865 Mov(scratch2, ExternalReference(counter));
2866 Ldr(scratch1, MemOperand(scratch2));
2867 Add(scratch1, scratch1, value);
2868 Str(scratch1, MemOperand(scratch2));
2869 }
2870}
2871
2872
2873void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2874 Register scratch1, Register scratch2) {
2875 IncrementCounter(counter, -value, scratch1, scratch2);
2876}
2877
2878
2879void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2880 if (context_chain_length > 0) {
2881 // Move up the chain of contexts to the context containing the slot.
2882 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2883 for (int i = 1; i < context_chain_length; i++) {
2884 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2885 }
2886 } else {
2887 // Slot is in the current function context. Move it into the
2888 // destination register in case we store into it (the write barrier
2889 // cannot be allowed to destroy the context in cp).
2890 Mov(dst, cp);
2891 }
2892}
2893
2894
2895void MacroAssembler::DebugBreak() {
2896 Mov(x0, 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002897 Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002898 CEntryStub ces(isolate(), 1);
2899 DCHECK(AllowThisStubCall(&ces));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002900 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002901}
2902
2903
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002904void MacroAssembler::PushStackHandler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002905 DCHECK(jssp.Is(StackPointer()));
2906 // Adjust this code if the asserts don't hold.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002907 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002908 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002909
2910 // For the JSEntry handler, we must preserve the live registers x0-x4.
2911 // (See JSEntryStub::GenerateBody().)
2912
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002913 // Link the current handler as the next handler.
2914 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
2915 Ldr(x10, MemOperand(x11));
2916 Push(x10);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002917
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002918 // Set this new handler as the current one.
2919 Str(jssp, MemOperand(x11));
2920}
2921
2922
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002923void MacroAssembler::PopStackHandler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002924 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2925 Pop(x10);
2926 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
2927 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
2928 Str(x10, MemOperand(x11));
2929}
2930
2931
2932void MacroAssembler::Allocate(int object_size,
2933 Register result,
2934 Register scratch1,
2935 Register scratch2,
2936 Label* gc_required,
2937 AllocationFlags flags) {
2938 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2939 if (!FLAG_inline_new) {
2940 if (emit_debug_code()) {
2941 // Trash the registers to simulate an allocation failure.
2942 // We apply salt to the original zap value to easily spot the values.
2943 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
2944 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
2945 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
2946 }
2947 B(gc_required);
2948 return;
2949 }
2950
2951 UseScratchRegisterScope temps(this);
2952 Register scratch3 = temps.AcquireX();
2953
2954 DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
2955 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
2956
2957 // Make object size into bytes.
2958 if ((flags & SIZE_IN_WORDS) != 0) {
2959 object_size *= kPointerSize;
2960 }
2961 DCHECK(0 == (object_size & kObjectAlignmentMask));
2962
2963 // Check relative positions of allocation top and limit addresses.
2964 // The values must be adjacent in memory to allow the use of LDP.
2965 ExternalReference heap_allocation_top =
2966 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2967 ExternalReference heap_allocation_limit =
2968 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2969 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
2970 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
2971 DCHECK((limit - top) == kPointerSize);
2972
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002973 // Set up allocation top address and allocation limit registers.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002974 Register top_address = scratch1;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002975 Register alloc_limit = scratch2;
2976 Register result_end = scratch3;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002977 Mov(top_address, Operand(heap_allocation_top));
2978
2979 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002980 // Load allocation top into result and allocation limit into alloc_limit.
2981 Ldp(result, alloc_limit, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002982 } else {
2983 if (emit_debug_code()) {
2984 // Assert that result actually contains top on entry.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002985 Ldr(alloc_limit, MemOperand(top_address));
2986 Cmp(result, alloc_limit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002987 Check(eq, kUnexpectedAllocationTop);
2988 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002989 // Load allocation limit. Result already contains allocation top.
2990 Ldr(alloc_limit, MemOperand(top_address, limit - top));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002991 }
2992
2993 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
2994 // the same alignment on ARM64.
2995 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
2996
2997 // Calculate new top and bail out if new space is exhausted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002998 Adds(result_end, result, object_size);
2999 Ccmp(result_end, alloc_limit, CFlag, cc);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003000 B(hi, gc_required);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003001 Str(result_end, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003002
3003 // Tag the object if requested.
3004 if ((flags & TAG_OBJECT) != 0) {
3005 ObjectTag(result, result);
3006 }
3007}
3008
3009
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003010void MacroAssembler::Allocate(Register object_size, Register result,
3011 Register result_end, Register scratch,
3012 Label* gc_required, AllocationFlags flags) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003013 if (!FLAG_inline_new) {
3014 if (emit_debug_code()) {
3015 // Trash the registers to simulate an allocation failure.
3016 // We apply salt to the original zap value to easily spot the values.
3017 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003018 Mov(scratch, (kDebugZapValue & ~0xffL) | 0x21L);
3019 Mov(result_end, (kDebugZapValue & ~0xffL) | 0x21L);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003020 }
3021 B(gc_required);
3022 return;
3023 }
3024
3025 UseScratchRegisterScope temps(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003026 Register scratch2 = temps.AcquireX();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003027
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003028 // |object_size| and |result_end| may overlap, other registers must not.
3029 DCHECK(!AreAliased(object_size, result, scratch, scratch2));
3030 DCHECK(!AreAliased(result_end, result, scratch, scratch2));
3031 DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
3032 result_end.Is64Bits());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003033
3034 // Check relative positions of allocation top and limit addresses.
3035 // The values must be adjacent in memory to allow the use of LDP.
3036 ExternalReference heap_allocation_top =
3037 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3038 ExternalReference heap_allocation_limit =
3039 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3040 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3041 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3042 DCHECK((limit - top) == kPointerSize);
3043
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003044 // Set up allocation top address and allocation limit registers.
3045 Register top_address = scratch;
3046 Register alloc_limit = scratch2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003047 Mov(top_address, heap_allocation_top);
3048
3049 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003050 // Load allocation top into result and allocation limit into alloc_limit.
3051 Ldp(result, alloc_limit, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003052 } else {
3053 if (emit_debug_code()) {
3054 // Assert that result actually contains top on entry.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003055 Ldr(alloc_limit, MemOperand(top_address));
3056 Cmp(result, alloc_limit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003057 Check(eq, kUnexpectedAllocationTop);
3058 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003059 // Load allocation limit. Result already contains allocation top.
3060 Ldr(alloc_limit, MemOperand(top_address, limit - top));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003061 }
3062
3063 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3064 // the same alignment on ARM64.
3065 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3066
3067 // Calculate new top and bail out if new space is exhausted
3068 if ((flags & SIZE_IN_WORDS) != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003069 Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003070 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003071 Adds(result_end, result, object_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003072 }
3073
3074 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003075 Tst(result_end, kObjectAlignmentMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003076 Check(eq, kUnalignedAllocationInNewSpace);
3077 }
3078
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003079 Ccmp(result_end, alloc_limit, CFlag, cc);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003080 B(hi, gc_required);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003081 Str(result_end, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003082
3083 // Tag the object if requested.
3084 if ((flags & TAG_OBJECT) != 0) {
3085 ObjectTag(result, result);
3086 }
3087}
3088
3089
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003090void MacroAssembler::AllocateTwoByteString(Register result,
3091 Register length,
3092 Register scratch1,
3093 Register scratch2,
3094 Register scratch3,
3095 Label* gc_required) {
3096 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3097 // Calculate the number of bytes needed for the characters in the string while
3098 // observing object alignment.
3099 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3100 Add(scratch1, length, length); // Length in bytes, not chars.
3101 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3102 Bic(scratch1, scratch1, kObjectAlignmentMask);
3103
3104 // Allocate two-byte string in new space.
3105 Allocate(scratch1,
3106 result,
3107 scratch2,
3108 scratch3,
3109 gc_required,
3110 TAG_OBJECT);
3111
3112 // Set the map, length and hash field.
3113 InitializeNewString(result,
3114 length,
3115 Heap::kStringMapRootIndex,
3116 scratch1,
3117 scratch2);
3118}
3119
3120
3121void MacroAssembler::AllocateOneByteString(Register result, Register length,
3122 Register scratch1, Register scratch2,
3123 Register scratch3,
3124 Label* gc_required) {
3125 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3126 // Calculate the number of bytes needed for the characters in the string while
3127 // observing object alignment.
3128 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3129 STATIC_ASSERT(kCharSize == 1);
3130 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3131 Bic(scratch1, scratch1, kObjectAlignmentMask);
3132
3133 // Allocate one-byte string in new space.
3134 Allocate(scratch1,
3135 result,
3136 scratch2,
3137 scratch3,
3138 gc_required,
3139 TAG_OBJECT);
3140
3141 // Set the map, length and hash field.
3142 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3143 scratch1, scratch2);
3144}
3145
3146
3147void MacroAssembler::AllocateTwoByteConsString(Register result,
3148 Register length,
3149 Register scratch1,
3150 Register scratch2,
3151 Label* gc_required) {
3152 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3153 TAG_OBJECT);
3154
3155 InitializeNewString(result,
3156 length,
3157 Heap::kConsStringMapRootIndex,
3158 scratch1,
3159 scratch2);
3160}
3161
3162
3163void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3164 Register scratch1,
3165 Register scratch2,
3166 Label* gc_required) {
3167 Allocate(ConsString::kSize,
3168 result,
3169 scratch1,
3170 scratch2,
3171 gc_required,
3172 TAG_OBJECT);
3173
3174 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3175 scratch1, scratch2);
3176}
3177
3178
3179void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3180 Register length,
3181 Register scratch1,
3182 Register scratch2,
3183 Label* gc_required) {
3184 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3185 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3186 TAG_OBJECT);
3187
3188 InitializeNewString(result,
3189 length,
3190 Heap::kSlicedStringMapRootIndex,
3191 scratch1,
3192 scratch2);
3193}
3194
3195
3196void MacroAssembler::AllocateOneByteSlicedString(Register result,
3197 Register length,
3198 Register scratch1,
3199 Register scratch2,
3200 Label* gc_required) {
3201 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3202 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3203 TAG_OBJECT);
3204
3205 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3206 scratch1, scratch2);
3207}
3208
3209
3210// Allocates a heap number or jumps to the need_gc label if the young space
3211// is full and a scavenge is needed.
3212void MacroAssembler::AllocateHeapNumber(Register result,
3213 Label* gc_required,
3214 Register scratch1,
3215 Register scratch2,
3216 CPURegister value,
3217 CPURegister heap_number_map,
3218 MutableMode mode) {
3219 DCHECK(!value.IsValid() || value.Is64Bits());
3220 UseScratchRegisterScope temps(this);
3221
3222 // Allocate an object in the heap for the heap number and tag it as a heap
3223 // object.
3224 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3225 NO_ALLOCATION_FLAGS);
3226
3227 Heap::RootListIndex map_index = mode == MUTABLE
3228 ? Heap::kMutableHeapNumberMapRootIndex
3229 : Heap::kHeapNumberMapRootIndex;
3230
3231 // Prepare the heap number map.
3232 if (!heap_number_map.IsValid()) {
3233 // If we have a valid value register, use the same type of register to store
3234 // the map so we can use STP to store both in one instruction.
3235 if (value.IsValid() && value.IsFPRegister()) {
3236 heap_number_map = temps.AcquireD();
3237 } else {
3238 heap_number_map = scratch1;
3239 }
3240 LoadRoot(heap_number_map, map_index);
3241 }
3242 if (emit_debug_code()) {
3243 Register map;
3244 if (heap_number_map.IsFPRegister()) {
3245 map = scratch1;
3246 Fmov(map, DoubleRegister(heap_number_map));
3247 } else {
3248 map = Register(heap_number_map);
3249 }
3250 AssertRegisterIsRoot(map, map_index);
3251 }
3252
3253 // Store the heap number map and the value in the allocated object.
3254 if (value.IsSameSizeAndType(heap_number_map)) {
3255 STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
3256 HeapNumber::kValueOffset);
3257 Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset));
3258 } else {
3259 Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3260 if (value.IsValid()) {
3261 Str(value, MemOperand(result, HeapNumber::kValueOffset));
3262 }
3263 }
3264 ObjectTag(result, result);
3265}
3266
3267
3268void MacroAssembler::JumpIfObjectType(Register object,
3269 Register map,
3270 Register type_reg,
3271 InstanceType type,
3272 Label* if_cond_pass,
3273 Condition cond) {
3274 CompareObjectType(object, map, type_reg, type);
3275 B(cond, if_cond_pass);
3276}
3277
3278
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003279void MacroAssembler::AllocateJSValue(Register result, Register constructor,
3280 Register value, Register scratch1,
3281 Register scratch2, Label* gc_required) {
3282 DCHECK(!result.is(constructor));
3283 DCHECK(!result.is(scratch1));
3284 DCHECK(!result.is(scratch2));
3285 DCHECK(!result.is(value));
3286
3287 // Allocate JSValue in new space.
3288 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
3289
3290 // Initialize the JSValue.
3291 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
3292 Str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
3293 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
3294 Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
3295 Str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
3296 Str(value, FieldMemOperand(result, JSValue::kValueOffset));
3297 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
3298}
3299
3300
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003301void MacroAssembler::JumpIfNotObjectType(Register object,
3302 Register map,
3303 Register type_reg,
3304 InstanceType type,
3305 Label* if_not_object) {
3306 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
3307}
3308
3309
3310// Sets condition flags based on comparison, and returns type in type_reg.
3311void MacroAssembler::CompareObjectType(Register object,
3312 Register map,
3313 Register type_reg,
3314 InstanceType type) {
3315 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3316 CompareInstanceType(map, type_reg, type);
3317}
3318
3319
3320// Sets condition flags based on comparison, and returns type in type_reg.
3321void MacroAssembler::CompareInstanceType(Register map,
3322 Register type_reg,
3323 InstanceType type) {
3324 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3325 Cmp(type_reg, type);
3326}
3327
3328
3329void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
3330 UseScratchRegisterScope temps(this);
3331 Register obj_map = temps.AcquireX();
3332 Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
3333 CompareRoot(obj_map, index);
3334}
3335
3336
3337void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
3338 Handle<Map> map) {
3339 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3340 CompareMap(scratch, map);
3341}
3342
3343
3344void MacroAssembler::CompareMap(Register obj_map,
3345 Handle<Map> map) {
3346 Cmp(obj_map, Operand(map));
3347}
3348
3349
3350void MacroAssembler::CheckMap(Register obj,
3351 Register scratch,
3352 Handle<Map> map,
3353 Label* fail,
3354 SmiCheckType smi_check_type) {
3355 if (smi_check_type == DO_SMI_CHECK) {
3356 JumpIfSmi(obj, fail);
3357 }
3358
3359 CompareObjectMap(obj, scratch, map);
3360 B(ne, fail);
3361}
3362
3363
3364void MacroAssembler::CheckMap(Register obj,
3365 Register scratch,
3366 Heap::RootListIndex index,
3367 Label* fail,
3368 SmiCheckType smi_check_type) {
3369 if (smi_check_type == DO_SMI_CHECK) {
3370 JumpIfSmi(obj, fail);
3371 }
3372 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3373 JumpIfNotRoot(scratch, index, fail);
3374}
3375
3376
3377void MacroAssembler::CheckMap(Register obj_map,
3378 Handle<Map> map,
3379 Label* fail,
3380 SmiCheckType smi_check_type) {
3381 if (smi_check_type == DO_SMI_CHECK) {
3382 JumpIfSmi(obj_map, fail);
3383 }
3384
3385 CompareMap(obj_map, map);
3386 B(ne, fail);
3387}
3388
3389
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003390void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3391 Register scratch2, Handle<WeakCell> cell,
3392 Handle<Code> success,
3393 SmiCheckType smi_check_type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003394 Label fail;
3395 if (smi_check_type == DO_SMI_CHECK) {
3396 JumpIfSmi(obj, &fail);
3397 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003398 Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3399 CmpWeakValue(scratch1, cell, scratch2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003400 B(ne, &fail);
3401 Jump(success, RelocInfo::CODE_TARGET);
3402 Bind(&fail);
3403}
3404
3405
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003406void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
3407 Register scratch) {
3408 Mov(scratch, Operand(cell));
3409 Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
3410 Cmp(value, scratch);
3411}
3412
3413
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003414void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003415 Mov(value, Operand(cell));
3416 Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003417}
3418
3419
3420void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3421 Label* miss) {
3422 GetWeakValue(value, cell);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003423 JumpIfSmi(value, miss);
3424}
3425
3426
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003427void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
3428 UseScratchRegisterScope temps(this);
3429 Register temp = temps.AcquireX();
3430 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
3431 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3432 Tst(temp, mask);
3433}
3434
3435
3436void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
3437 // Load the map's "bit field 2".
3438 __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
3439 // Retrieve elements_kind from bit field 2.
3440 DecodeField<Map::ElementsKindBits>(result);
3441}
3442
3443
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003444void MacroAssembler::GetMapConstructor(Register result, Register map,
3445 Register temp, Register temp2) {
3446 Label done, loop;
3447 Ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
3448 Bind(&loop);
3449 JumpIfSmi(result, &done);
3450 CompareObjectType(result, temp, temp2, MAP_TYPE);
3451 B(ne, &done);
3452 Ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
3453 B(&loop);
3454 Bind(&done);
3455}
3456
3457
3458void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
3459 Register scratch, Label* miss) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003460 DCHECK(!AreAliased(function, result, scratch));
3461
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003462 // Get the prototype or initial map from the function.
3463 Ldr(result,
3464 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3465
3466 // If the prototype or initial map is the hole, don't return it and simply
3467 // miss the cache instead. This will allow us to allocate a prototype object
3468 // on-demand in the runtime system.
3469 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
3470
3471 // If the function does not have an initial map, we're done.
3472 Label done;
3473 JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
3474
3475 // Get the prototype from the initial map.
3476 Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3477
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003478 // All done.
3479 Bind(&done);
3480}
3481
3482
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003483void MacroAssembler::PushRoot(Heap::RootListIndex index) {
3484 UseScratchRegisterScope temps(this);
3485 Register temp = temps.AcquireX();
3486 LoadRoot(temp, index);
3487 Push(temp);
3488}
3489
3490
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003491void MacroAssembler::CompareRoot(const Register& obj,
3492 Heap::RootListIndex index) {
3493 UseScratchRegisterScope temps(this);
3494 Register temp = temps.AcquireX();
3495 DCHECK(!AreAliased(obj, temp));
3496 LoadRoot(temp, index);
3497 Cmp(obj, temp);
3498}
3499
3500
3501void MacroAssembler::JumpIfRoot(const Register& obj,
3502 Heap::RootListIndex index,
3503 Label* if_equal) {
3504 CompareRoot(obj, index);
3505 B(eq, if_equal);
3506}
3507
3508
3509void MacroAssembler::JumpIfNotRoot(const Register& obj,
3510 Heap::RootListIndex index,
3511 Label* if_not_equal) {
3512 CompareRoot(obj, index);
3513 B(ne, if_not_equal);
3514}
3515
3516
3517void MacroAssembler::CompareAndSplit(const Register& lhs,
3518 const Operand& rhs,
3519 Condition cond,
3520 Label* if_true,
3521 Label* if_false,
3522 Label* fall_through) {
3523 if ((if_true == if_false) && (if_false == fall_through)) {
3524 // Fall through.
3525 } else if (if_true == if_false) {
3526 B(if_true);
3527 } else if (if_false == fall_through) {
3528 CompareAndBranch(lhs, rhs, cond, if_true);
3529 } else if (if_true == fall_through) {
3530 CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
3531 } else {
3532 CompareAndBranch(lhs, rhs, cond, if_true);
3533 B(if_false);
3534 }
3535}
3536
3537
3538void MacroAssembler::TestAndSplit(const Register& reg,
3539 uint64_t bit_pattern,
3540 Label* if_all_clear,
3541 Label* if_any_set,
3542 Label* fall_through) {
3543 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3544 // Fall through.
3545 } else if (if_all_clear == if_any_set) {
3546 B(if_all_clear);
3547 } else if (if_all_clear == fall_through) {
3548 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3549 } else if (if_any_set == fall_through) {
3550 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
3551 } else {
3552 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3553 B(if_all_clear);
3554 }
3555}
3556
3557
3558void MacroAssembler::CheckFastElements(Register map,
3559 Register scratch,
3560 Label* fail) {
3561 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3562 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3563 STATIC_ASSERT(FAST_ELEMENTS == 2);
3564 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3565 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3566 Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
3567 B(hi, fail);
3568}
3569
3570
3571void MacroAssembler::CheckFastObjectElements(Register map,
3572 Register scratch,
3573 Label* fail) {
3574 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3575 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3576 STATIC_ASSERT(FAST_ELEMENTS == 2);
3577 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3578 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3579 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3580 // If cond==ls, set cond=hi, otherwise compare.
3581 Ccmp(scratch,
3582 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
3583 B(hi, fail);
3584}
3585
3586
3587// Note: The ARM version of this clobbers elements_reg, but this version does
3588// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
3589void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3590 Register key_reg,
3591 Register elements_reg,
3592 Register scratch1,
3593 FPRegister fpscratch1,
3594 Label* fail,
3595 int elements_offset) {
3596 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
3597 Label store_num;
3598
3599 // Speculatively convert the smi to a double - all smis can be exactly
3600 // represented as a double.
3601 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
3602
3603 // If value_reg is a smi, we're done.
3604 JumpIfSmi(value_reg, &store_num);
3605
3606 // Ensure that the object is a heap number.
3607 JumpIfNotHeapNumber(value_reg, fail);
3608
3609 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3610
3611 // Canonicalize NaNs.
3612 CanonicalizeNaN(fpscratch1);
3613
3614 // Store the result.
3615 Bind(&store_num);
3616 Add(scratch1, elements_reg,
3617 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
3618 Str(fpscratch1,
3619 FieldMemOperand(scratch1,
3620 FixedDoubleArray::kHeaderSize - elements_offset));
3621}
3622
3623
3624bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3625 return has_frame_ || !stub->SometimesSetsUpAFrame();
3626}
3627
3628
3629void MacroAssembler::IndexFromHash(Register hash, Register index) {
3630 // If the hash field contains an array index pick it out. The assert checks
3631 // that the constants for the maximum number of digits for an array index
3632 // cached in the hash field and the number of bits reserved for it does not
3633 // conflict.
3634 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
3635 (1 << String::kArrayIndexValueBits));
3636 DecodeField<String::ArrayIndexValueBits>(index, hash);
3637 SmiTag(index, index);
3638}
3639
3640
3641void MacroAssembler::EmitSeqStringSetCharCheck(
3642 Register string,
3643 Register index,
3644 SeqStringSetCharCheckIndexType index_type,
3645 Register scratch,
3646 uint32_t encoding_mask) {
3647 DCHECK(!AreAliased(string, index, scratch));
3648
3649 if (index_type == kIndexIsSmi) {
3650 AssertSmi(index);
3651 }
3652
3653 // Check that string is an object.
3654 AssertNotSmi(string, kNonObject);
3655
3656 // Check that string has an appropriate map.
3657 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
3658 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3659
3660 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
3661 Cmp(scratch, encoding_mask);
3662 Check(eq, kUnexpectedStringType);
3663
3664 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
3665 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
3666 Check(lt, kIndexIsTooLarge);
3667
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003668 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003669 Cmp(index, 0);
3670 Check(ge, kIndexIsNegative);
3671}
3672
3673
3674void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3675 Register scratch1,
3676 Register scratch2,
3677 Label* miss) {
3678 DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
3679 Label same_contexts;
3680
3681 // Load current lexical context from the stack frame.
3682 Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
3683 // In debug mode, make sure the lexical context is set.
3684#ifdef DEBUG
3685 Cmp(scratch1, 0);
3686 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
3687#endif
3688
3689 // Load the native context of the current context.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003690 Ldr(scratch1, ContextMemOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003691
3692 // Check the context is a native context.
3693 if (emit_debug_code()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003694 // Read the first word and compare to the native_context_map.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003695 Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
3696 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
3697 Check(eq, kExpectedNativeContext);
3698 }
3699
3700 // Check if both contexts are the same.
3701 Ldr(scratch2, FieldMemOperand(holder_reg,
3702 JSGlobalProxy::kNativeContextOffset));
3703 Cmp(scratch1, scratch2);
3704 B(&same_contexts, eq);
3705
3706 // Check the context is a native context.
3707 if (emit_debug_code()) {
3708 // We're short on scratch registers here, so use holder_reg as a scratch.
3709 Push(holder_reg);
3710 Register scratch3 = holder_reg;
3711
3712 CompareRoot(scratch2, Heap::kNullValueRootIndex);
3713 Check(ne, kExpectedNonNullContext);
3714
3715 Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
3716 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
3717 Check(eq, kExpectedNativeContext);
3718 Pop(holder_reg);
3719 }
3720
3721 // Check that the security token in the calling global object is
3722 // compatible with the security token in the receiving global
3723 // object.
3724 int token_offset = Context::kHeaderSize +
3725 Context::SECURITY_TOKEN_INDEX * kPointerSize;
3726
3727 Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
3728 Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
3729 Cmp(scratch1, scratch2);
3730 B(miss, ne);
3731
3732 Bind(&same_contexts);
3733}
3734
3735
3736// Compute the hash code from the untagged key. This must be kept in sync with
3737// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
3738// code-stub-hydrogen.cc
3739void MacroAssembler::GetNumberHash(Register key, Register scratch) {
3740 DCHECK(!AreAliased(key, scratch));
3741
3742 // Xor original key with a seed.
3743 LoadRoot(scratch, Heap::kHashSeedRootIndex);
3744 Eor(key, key, Operand::UntagSmi(scratch));
3745
3746 // The algorithm uses 32-bit integer values.
3747 key = key.W();
3748 scratch = scratch.W();
3749
3750 // Compute the hash code from the untagged key. This must be kept in sync
3751 // with ComputeIntegerHash in utils.h.
3752 //
3753 // hash = ~hash + (hash <<1 15);
3754 Mvn(scratch, key);
3755 Add(key, scratch, Operand(key, LSL, 15));
3756 // hash = hash ^ (hash >> 12);
3757 Eor(key, key, Operand(key, LSR, 12));
3758 // hash = hash + (hash << 2);
3759 Add(key, key, Operand(key, LSL, 2));
3760 // hash = hash ^ (hash >> 4);
3761 Eor(key, key, Operand(key, LSR, 4));
3762 // hash = hash * 2057;
3763 Mov(scratch, Operand(key, LSL, 11));
3764 Add(key, key, Operand(key, LSL, 3));
3765 Add(key, key, scratch);
3766 // hash = hash ^ (hash >> 16);
3767 Eor(key, key, Operand(key, LSR, 16));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003768 Bic(key, key, Operand(0xc0000000u));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003769}
3770
3771
3772void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3773 Register elements,
3774 Register key,
3775 Register result,
3776 Register scratch0,
3777 Register scratch1,
3778 Register scratch2,
3779 Register scratch3) {
3780 DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
3781
3782 Label done;
3783
3784 SmiUntag(scratch0, key);
3785 GetNumberHash(scratch0, scratch1);
3786
3787 // Compute the capacity mask.
3788 Ldrsw(scratch1,
3789 UntagSmiFieldMemOperand(elements,
3790 SeededNumberDictionary::kCapacityOffset));
3791 Sub(scratch1, scratch1, 1);
3792
3793 // Generate an unrolled loop that performs a few probes before giving up.
3794 for (int i = 0; i < kNumberDictionaryProbes; i++) {
3795 // Compute the masked index: (hash + i + i * i) & mask.
3796 if (i > 0) {
3797 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
3798 } else {
3799 Mov(scratch2, scratch0);
3800 }
3801 And(scratch2, scratch2, scratch1);
3802
3803 // Scale the index by multiplying by the element size.
3804 DCHECK(SeededNumberDictionary::kEntrySize == 3);
3805 Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
3806
3807 // Check if the key is identical to the name.
3808 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
3809 Ldr(scratch3,
3810 FieldMemOperand(scratch2,
3811 SeededNumberDictionary::kElementsStartOffset));
3812 Cmp(key, scratch3);
3813 if (i != (kNumberDictionaryProbes - 1)) {
3814 B(eq, &done);
3815 } else {
3816 B(ne, miss);
3817 }
3818 }
3819
3820 Bind(&done);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003821 // Check that the value is a field property.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003822 const int kDetailsOffset =
3823 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
3824 Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003825 DCHECK_EQ(DATA, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003826 TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
3827
3828 // Get the value at the masked, scaled index and return.
3829 const int kValueOffset =
3830 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
3831 Ldr(result, FieldMemOperand(scratch2, kValueOffset));
3832}
3833
Ben Murdoch097c5b22016-05-18 11:27:45 +01003834void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
3835 Register code_entry,
3836 Register scratch) {
3837 const int offset = JSFunction::kCodeEntryOffset;
3838
3839 // Since a code entry (value) is always in old space, we don't need to update
3840 // remembered set. If incremental marking is off, there is nothing for us to
3841 // do.
3842 if (!FLAG_incremental_marking) return;
3843
3844 DCHECK(js_function.is(x1));
3845 DCHECK(code_entry.is(x7));
3846 DCHECK(scratch.is(x5));
3847 AssertNotSmi(js_function);
3848
3849 if (emit_debug_code()) {
3850 UseScratchRegisterScope temps(this);
3851 Register temp = temps.AcquireX();
3852 Add(scratch, js_function, offset - kHeapObjectTag);
3853 Ldr(temp, MemOperand(scratch));
3854 Cmp(temp, code_entry);
3855 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
3856 }
3857
3858 // First, check if a write barrier is even needed. The tests below
3859 // catch stores of Smis and stores into young gen.
3860 Label done;
3861
3862 CheckPageFlagClear(code_entry, scratch,
3863 MemoryChunk::kPointersToHereAreInterestingMask, &done);
3864 CheckPageFlagClear(js_function, scratch,
3865 MemoryChunk::kPointersFromHereAreInterestingMask, &done);
3866
3867 const Register dst = scratch;
3868 Add(dst, js_function, offset - kHeapObjectTag);
3869
3870 // Save caller-saved registers.Both input registers (x1 and x7) are caller
3871 // saved, so there is no need to push them.
3872 PushCPURegList(kCallerSaved);
3873
3874 int argument_count = 3;
3875
3876 Mov(x0, js_function);
3877 Mov(x1, dst);
3878 Mov(x2, ExternalReference::isolate_address(isolate()));
3879
3880 {
3881 AllowExternalCallThatCantCauseGC scope(this);
3882 CallCFunction(
3883 ExternalReference::incremental_marking_record_write_code_entry_function(
3884 isolate()),
3885 argument_count);
3886 }
3887
3888 // Restore caller-saved registers.
3889 PopCPURegList(kCallerSaved);
3890
3891 Bind(&done);
3892}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003893
3894void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
3895 Register address,
3896 Register scratch1,
3897 SaveFPRegsMode fp_mode,
3898 RememberedSetFinalAction and_then) {
3899 DCHECK(!AreAliased(object, address, scratch1));
3900 Label done, store_buffer_overflow;
3901 if (emit_debug_code()) {
3902 Label ok;
3903 JumpIfNotInNewSpace(object, &ok);
3904 Abort(kRememberedSetPointerInNewSpace);
3905 bind(&ok);
3906 }
3907 UseScratchRegisterScope temps(this);
3908 Register scratch2 = temps.AcquireX();
3909
3910 // Load store buffer top.
3911 Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
3912 Ldr(scratch1, MemOperand(scratch2));
3913 // Store pointer to buffer and increment buffer top.
3914 Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
3915 // Write back new top of buffer.
3916 Str(scratch1, MemOperand(scratch2));
3917 // Call stub on end of buffer.
3918 // Check for end of buffer.
3919 DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
3920 (1 << (14 + kPointerSizeLog2)));
3921 if (and_then == kFallThroughAtEnd) {
3922 Tbz(scratch1, (14 + kPointerSizeLog2), &done);
3923 } else {
3924 DCHECK(and_then == kReturnAtEnd);
3925 Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
3926 Ret();
3927 }
3928
3929 Bind(&store_buffer_overflow);
3930 Push(lr);
3931 StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
3932 CallStub(&store_buffer_overflow_stub);
3933 Pop(lr);
3934
3935 Bind(&done);
3936 if (and_then == kReturnAtEnd) {
3937 Ret();
3938 }
3939}
3940
3941
3942void MacroAssembler::PopSafepointRegisters() {
3943 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
3944 PopXRegList(kSafepointSavedRegisters);
3945 Drop(num_unsaved);
3946}
3947
3948
3949void MacroAssembler::PushSafepointRegisters() {
3950 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
3951 // adjust the stack for unsaved registers.
3952 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
3953 DCHECK(num_unsaved >= 0);
3954 Claim(num_unsaved);
3955 PushXRegList(kSafepointSavedRegisters);
3956}
3957
3958
3959void MacroAssembler::PushSafepointRegistersAndDoubles() {
3960 PushSafepointRegisters();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003961 PushCPURegList(CPURegList(
3962 CPURegister::kFPRegister, kDRegSizeInBits,
3963 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
3964 ->allocatable_double_codes_mask()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003965}
3966
3967
3968void MacroAssembler::PopSafepointRegistersAndDoubles() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003969 PopCPURegList(CPURegList(
3970 CPURegister::kFPRegister, kDRegSizeInBits,
3971 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
3972 ->allocatable_double_codes_mask()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003973 PopSafepointRegisters();
3974}
3975
3976
3977int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
3978 // Make sure the safepoint registers list is what we expect.
3979 DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
3980
3981 // Safepoint registers are stored contiguously on the stack, but not all the
3982 // registers are saved. The following registers are excluded:
3983 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
3984 // the macro assembler.
3985 // - x28 (jssp) because JS stack pointer doesn't need to be included in
3986 // safepoint registers.
3987 // - x31 (csp) because the system stack pointer doesn't need to be included
3988 // in safepoint registers.
3989 //
3990 // This function implements the mapping of register code to index into the
3991 // safepoint register slots.
3992 if ((reg_code >= 0) && (reg_code <= 15)) {
3993 return reg_code;
3994 } else if ((reg_code >= 18) && (reg_code <= 27)) {
3995 // Skip ip0 and ip1.
3996 return reg_code - 2;
3997 } else if ((reg_code == 29) || (reg_code == 30)) {
3998 // Also skip jssp.
3999 return reg_code - 3;
4000 } else {
4001 // This register has no safepoint register slot.
4002 UNREACHABLE();
4003 return -1;
4004 }
4005}
4006
Ben Murdoch097c5b22016-05-18 11:27:45 +01004007void MacroAssembler::CheckPageFlag(const Register& object,
4008 const Register& scratch, int mask,
4009 Condition cc, Label* condition_met) {
4010 And(scratch, object, ~Page::kPageAlignmentMask);
4011 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4012 if (cc == eq) {
4013 TestAndBranchIfAnySet(scratch, mask, condition_met);
4014 } else {
4015 TestAndBranchIfAllClear(scratch, mask, condition_met);
4016 }
4017}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004018
4019void MacroAssembler::CheckPageFlagSet(const Register& object,
4020 const Register& scratch,
4021 int mask,
4022 Label* if_any_set) {
4023 And(scratch, object, ~Page::kPageAlignmentMask);
4024 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4025 TestAndBranchIfAnySet(scratch, mask, if_any_set);
4026}
4027
4028
4029void MacroAssembler::CheckPageFlagClear(const Register& object,
4030 const Register& scratch,
4031 int mask,
4032 Label* if_all_clear) {
4033 And(scratch, object, ~Page::kPageAlignmentMask);
4034 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4035 TestAndBranchIfAllClear(scratch, mask, if_all_clear);
4036}
4037
4038
4039void MacroAssembler::RecordWriteField(
4040 Register object,
4041 int offset,
4042 Register value,
4043 Register scratch,
4044 LinkRegisterStatus lr_status,
4045 SaveFPRegsMode save_fp,
4046 RememberedSetAction remembered_set_action,
4047 SmiCheck smi_check,
4048 PointersToHereCheck pointers_to_here_check_for_value) {
4049 // First, check if a write barrier is even needed. The tests below
4050 // catch stores of Smis.
4051 Label done;
4052
4053 // Skip the barrier if writing a smi.
4054 if (smi_check == INLINE_SMI_CHECK) {
4055 JumpIfSmi(value, &done);
4056 }
4057
4058 // Although the object register is tagged, the offset is relative to the start
4059 // of the object, so offset must be a multiple of kPointerSize.
4060 DCHECK(IsAligned(offset, kPointerSize));
4061
4062 Add(scratch, object, offset - kHeapObjectTag);
4063 if (emit_debug_code()) {
4064 Label ok;
4065 Tst(scratch, (1 << kPointerSizeLog2) - 1);
4066 B(eq, &ok);
4067 Abort(kUnalignedCellInWriteBarrier);
4068 Bind(&ok);
4069 }
4070
4071 RecordWrite(object,
4072 scratch,
4073 value,
4074 lr_status,
4075 save_fp,
4076 remembered_set_action,
4077 OMIT_SMI_CHECK,
4078 pointers_to_here_check_for_value);
4079
4080 Bind(&done);
4081
4082 // Clobber clobbered input registers when running with the debug-code flag
4083 // turned on to provoke errors.
4084 if (emit_debug_code()) {
4085 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
4086 Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
4087 }
4088}
4089
4090
4091// Will clobber: object, map, dst.
4092// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4093void MacroAssembler::RecordWriteForMap(Register object,
4094 Register map,
4095 Register dst,
4096 LinkRegisterStatus lr_status,
4097 SaveFPRegsMode fp_mode) {
4098 ASM_LOCATION("MacroAssembler::RecordWrite");
4099 DCHECK(!AreAliased(object, map));
4100
4101 if (emit_debug_code()) {
4102 UseScratchRegisterScope temps(this);
4103 Register temp = temps.AcquireX();
4104
4105 CompareObjectMap(map, temp, isolate()->factory()->meta_map());
4106 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4107 }
4108
4109 if (!FLAG_incremental_marking) {
4110 return;
4111 }
4112
4113 if (emit_debug_code()) {
4114 UseScratchRegisterScope temps(this);
4115 Register temp = temps.AcquireX();
4116
4117 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4118 Cmp(temp, map);
4119 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4120 }
4121
4122 // First, check if a write barrier is even needed. The tests below
4123 // catch stores of smis and stores into the young generation.
4124 Label done;
4125
4126 // A single check of the map's pages interesting flag suffices, since it is
4127 // only set during incremental collection, and then it's also guaranteed that
4128 // the from object's page's interesting flag is also set. This optimization
4129 // relies on the fact that maps can never be in new space.
4130 CheckPageFlagClear(map,
4131 map, // Used as scratch.
4132 MemoryChunk::kPointersToHereAreInterestingMask,
4133 &done);
4134
4135 // Record the actual write.
4136 if (lr_status == kLRHasNotBeenSaved) {
4137 Push(lr);
4138 }
4139 Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
4140 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
4141 fp_mode);
4142 CallStub(&stub);
4143 if (lr_status == kLRHasNotBeenSaved) {
4144 Pop(lr);
4145 }
4146
4147 Bind(&done);
4148
4149 // Count number of write barriers in generated code.
4150 isolate()->counters()->write_barriers_static()->Increment();
4151 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
4152 dst);
4153
4154 // Clobber clobbered registers when running with the debug-code flag
4155 // turned on to provoke errors.
4156 if (emit_debug_code()) {
4157 Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
4158 Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
4159 }
4160}
4161
4162
4163// Will clobber: object, address, value.
4164// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4165//
4166// The register 'object' contains a heap object pointer. The heap object tag is
4167// shifted away.
4168void MacroAssembler::RecordWrite(
4169 Register object,
4170 Register address,
4171 Register value,
4172 LinkRegisterStatus lr_status,
4173 SaveFPRegsMode fp_mode,
4174 RememberedSetAction remembered_set_action,
4175 SmiCheck smi_check,
4176 PointersToHereCheck pointers_to_here_check_for_value) {
4177 ASM_LOCATION("MacroAssembler::RecordWrite");
4178 DCHECK(!AreAliased(object, value));
4179
4180 if (emit_debug_code()) {
4181 UseScratchRegisterScope temps(this);
4182 Register temp = temps.AcquireX();
4183
4184 Ldr(temp, MemOperand(address));
4185 Cmp(temp, value);
4186 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4187 }
4188
4189 // First, check if a write barrier is even needed. The tests below
4190 // catch stores of smis and stores into the young generation.
4191 Label done;
4192
4193 if (smi_check == INLINE_SMI_CHECK) {
4194 DCHECK_EQ(0, kSmiTag);
4195 JumpIfSmi(value, &done);
4196 }
4197
4198 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
4199 CheckPageFlagClear(value,
4200 value, // Used as scratch.
4201 MemoryChunk::kPointersToHereAreInterestingMask,
4202 &done);
4203 }
4204 CheckPageFlagClear(object,
4205 value, // Used as scratch.
4206 MemoryChunk::kPointersFromHereAreInterestingMask,
4207 &done);
4208
4209 // Record the actual write.
4210 if (lr_status == kLRHasNotBeenSaved) {
4211 Push(lr);
4212 }
4213 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
4214 fp_mode);
4215 CallStub(&stub);
4216 if (lr_status == kLRHasNotBeenSaved) {
4217 Pop(lr);
4218 }
4219
4220 Bind(&done);
4221
4222 // Count number of write barriers in generated code.
4223 isolate()->counters()->write_barriers_static()->Increment();
4224 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
4225 value);
4226
4227 // Clobber clobbered registers when running with the debug-code flag
4228 // turned on to provoke errors.
4229 if (emit_debug_code()) {
4230 Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
4231 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
4232 }
4233}
4234
4235
4236void MacroAssembler::AssertHasValidColor(const Register& reg) {
4237 if (emit_debug_code()) {
4238 // The bit sequence is backward. The first character in the string
4239 // represents the least significant bit.
4240 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4241
4242 Label color_is_valid;
4243 Tbnz(reg, 0, &color_is_valid);
4244 Tbz(reg, 1, &color_is_valid);
4245 Abort(kUnexpectedColorFound);
4246 Bind(&color_is_valid);
4247 }
4248}
4249
4250
4251void MacroAssembler::GetMarkBits(Register addr_reg,
4252 Register bitmap_reg,
4253 Register shift_reg) {
4254 DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
4255 DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4256 // addr_reg is divided into fields:
4257 // |63 page base 20|19 high 8|7 shift 3|2 0|
4258 // 'high' gives the index of the cell holding color bits for the object.
4259 // 'shift' gives the offset in the cell for this object's color.
4260 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4261 UseScratchRegisterScope temps(this);
4262 Register temp = temps.AcquireX();
4263 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
4264 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4265 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
4266 // bitmap_reg:
4267 // |63 page base 20|19 zeros 15|14 high 3|2 0|
4268 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4269}
4270
4271
4272void MacroAssembler::HasColor(Register object,
4273 Register bitmap_scratch,
4274 Register shift_scratch,
4275 Label* has_color,
4276 int first_bit,
4277 int second_bit) {
4278 // See mark-compact.h for color definitions.
4279 DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
4280
4281 GetMarkBits(object, bitmap_scratch, shift_scratch);
4282 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4283 // Shift the bitmap down to get the color of the object in bits [1:0].
4284 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4285
4286 AssertHasValidColor(bitmap_scratch);
4287
4288 // These bit sequences are backwards. The first character in the string
4289 // represents the least significant bit.
4290 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004291 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4292 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004293
4294 // Check for the color.
4295 if (first_bit == 0) {
4296 // Checking for white.
4297 DCHECK(second_bit == 0);
4298 // We only need to test the first bit.
4299 Tbz(bitmap_scratch, 0, has_color);
4300 } else {
4301 Label other_color;
4302 // Checking for grey or black.
4303 Tbz(bitmap_scratch, 0, &other_color);
4304 if (second_bit == 0) {
4305 Tbz(bitmap_scratch, 1, has_color);
4306 } else {
4307 Tbnz(bitmap_scratch, 1, has_color);
4308 }
4309 Bind(&other_color);
4310 }
4311
4312 // Fall through if it does not have the right color.
4313}
4314
4315
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004316void MacroAssembler::JumpIfBlack(Register object,
4317 Register scratch0,
4318 Register scratch1,
4319 Label* on_black) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004320 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4321 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004322}
4323
4324
4325void MacroAssembler::JumpIfDictionaryInPrototypeChain(
4326 Register object,
4327 Register scratch0,
4328 Register scratch1,
4329 Label* found) {
4330 DCHECK(!AreAliased(object, scratch0, scratch1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004331 Register current = scratch0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004332 Label loop_again, end;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004333
4334 // Scratch contains elements pointer.
4335 Mov(current, object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004336 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4337 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4338 CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004339
4340 // Loop based on the map going up the prototype chain.
4341 Bind(&loop_again);
4342 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004343 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
4344 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
4345 CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
4346 B(lo, found);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004347 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4348 DecodeField<Map::ElementsKindBits>(scratch1);
4349 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
4350 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004351 CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);
4352
4353 Bind(&end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004354}
4355
4356
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004357void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
4358 Register shift_scratch, Register load_scratch,
4359 Register length_scratch,
4360 Label* value_is_white) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004361 DCHECK(!AreAliased(
4362 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4363
4364 // These bit sequences are backwards. The first character in the string
4365 // represents the least significant bit.
4366 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004367 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4368 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004369
4370 GetMarkBits(value, bitmap_scratch, shift_scratch);
4371 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4372 Lsr(load_scratch, load_scratch, shift_scratch);
4373
4374 AssertHasValidColor(load_scratch);
4375
4376 // If the value is black or grey we don't need to do anything.
4377 // Since both black and grey have a 1 in the first position and white does
4378 // not have a 1 there we only need to check one bit.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004379 Tbz(load_scratch, 0, value_is_white);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004380}
4381
4382
4383void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
4384 if (emit_debug_code()) {
4385 Check(cond, reason);
4386 }
4387}
4388
4389
4390
4391void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
4392 if (emit_debug_code()) {
4393 CheckRegisterIsClear(reg, reason);
4394 }
4395}
4396
4397
4398void MacroAssembler::AssertRegisterIsRoot(Register reg,
4399 Heap::RootListIndex index,
4400 BailoutReason reason) {
4401 if (emit_debug_code()) {
4402 CompareRoot(reg, index);
4403 Check(eq, reason);
4404 }
4405}
4406
4407
4408void MacroAssembler::AssertFastElements(Register elements) {
4409 if (emit_debug_code()) {
4410 UseScratchRegisterScope temps(this);
4411 Register temp = temps.AcquireX();
4412 Label ok;
4413 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
4414 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4415 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4416 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4417 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4418 Bind(&ok);
4419 }
4420}
4421
4422
4423void MacroAssembler::AssertIsString(const Register& object) {
4424 if (emit_debug_code()) {
4425 UseScratchRegisterScope temps(this);
4426 Register temp = temps.AcquireX();
4427 STATIC_ASSERT(kSmiTag == 0);
4428 Tst(object, kSmiTagMask);
4429 Check(ne, kOperandIsNotAString);
4430 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4431 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
4432 Check(lo, kOperandIsNotAString);
4433 }
4434}
4435
4436
4437void MacroAssembler::Check(Condition cond, BailoutReason reason) {
4438 Label ok;
4439 B(cond, &ok);
4440 Abort(reason);
4441 // Will not return here.
4442 Bind(&ok);
4443}
4444
4445
4446void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
4447 Label ok;
4448 Cbz(reg, &ok);
4449 Abort(reason);
4450 // Will not return here.
4451 Bind(&ok);
4452}
4453
4454
4455void MacroAssembler::Abort(BailoutReason reason) {
4456#ifdef DEBUG
4457 RecordComment("Abort message: ");
4458 RecordComment(GetBailoutReason(reason));
4459
4460 if (FLAG_trap_on_abort) {
4461 Brk(0);
4462 return;
4463 }
4464#endif
4465
4466 // Abort is used in some contexts where csp is the stack pointer. In order to
4467 // simplify the CallRuntime code, make sure that jssp is the stack pointer.
4468 // There is no risk of register corruption here because Abort doesn't return.
4469 Register old_stack_pointer = StackPointer();
4470 SetStackPointer(jssp);
4471 Mov(jssp, old_stack_pointer);
4472
4473 // We need some scratch registers for the MacroAssembler, so make sure we have
4474 // some. This is safe here because Abort never returns.
4475 RegList old_tmp_list = TmpList()->list();
4476 TmpList()->Combine(MacroAssembler::DefaultTmpList());
4477
4478 if (use_real_aborts()) {
4479 // Avoid infinite recursion; Push contains some assertions that use Abort.
4480 NoUseRealAbortsScope no_real_aborts(this);
4481
4482 Mov(x0, Smi::FromInt(reason));
4483 Push(x0);
4484
4485 if (!has_frame_) {
4486 // We don't actually want to generate a pile of code for this, so just
4487 // claim there is a stack frame, without generating one.
4488 FrameScope scope(this, StackFrame::NONE);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004489 CallRuntime(Runtime::kAbort);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004490 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004491 CallRuntime(Runtime::kAbort);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004492 }
4493 } else {
4494 // Load the string to pass to Printf.
4495 Label msg_address;
4496 Adr(x0, &msg_address);
4497
4498 // Call Printf directly to report the error.
4499 CallPrintf();
4500
4501 // We need a way to stop execution on both the simulator and real hardware,
4502 // and Unreachable() is the best option.
4503 Unreachable();
4504
4505 // Emit the message string directly in the instruction stream.
4506 {
4507 BlockPoolsScope scope(this);
4508 Bind(&msg_address);
4509 EmitStringData(GetBailoutReason(reason));
4510 }
4511 }
4512
4513 SetStackPointer(old_stack_pointer);
4514 TmpList()->set_list(old_tmp_list);
4515}
4516
4517
4518void MacroAssembler::LoadTransitionedArrayMapConditional(
4519 ElementsKind expected_kind,
4520 ElementsKind transitioned_kind,
4521 Register map_in_out,
4522 Register scratch1,
4523 Register scratch2,
4524 Label* no_map_match) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004525 DCHECK(IsFastElementsKind(expected_kind));
4526 DCHECK(IsFastElementsKind(transitioned_kind));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004527
4528 // Check that the function's map is the same as the expected cached map.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004529 Ldr(scratch1, NativeContextMemOperand());
4530 Ldr(scratch2,
4531 ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004532 Cmp(map_in_out, scratch2);
4533 B(ne, no_map_match);
4534
4535 // Use the transitioned cached map.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004536 Ldr(map_in_out,
4537 ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004538}
4539
4540
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004541void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
4542 Ldr(dst, NativeContextMemOperand());
4543 Ldr(dst, ContextMemOperand(dst, index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004544}
4545
4546
4547void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4548 Register map,
4549 Register scratch) {
4550 // Load the initial map. The global functions all have initial maps.
4551 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4552 if (emit_debug_code()) {
4553 Label ok, fail;
4554 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4555 B(&ok);
4556 Bind(&fail);
4557 Abort(kGlobalFunctionsMustHaveInitialMap);
4558 Bind(&ok);
4559 }
4560}
4561
4562
4563// This is the main Printf implementation. All other Printf variants call
4564// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
4565void MacroAssembler::PrintfNoPreserve(const char * format,
4566 const CPURegister& arg0,
4567 const CPURegister& arg1,
4568 const CPURegister& arg2,
4569 const CPURegister& arg3) {
4570 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4571 // in most cases anyway, so this restriction shouldn't be too serious.
4572 DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4573
4574 // The provided arguments, and their proper procedure-call standard registers.
4575 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
4576 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
4577
4578 int arg_count = kPrintfMaxArgCount;
4579
4580 // The PCS varargs registers for printf. Note that x0 is used for the printf
4581 // format string.
4582 static const CPURegList kPCSVarargs =
4583 CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
4584 static const CPURegList kPCSVarargsFP =
4585 CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
4586
4587 // We can use caller-saved registers as scratch values, except for the
4588 // arguments and the PCS registers where they might need to go.
4589 CPURegList tmp_list = kCallerSaved;
4590 tmp_list.Remove(x0); // Used to pass the format string.
4591 tmp_list.Remove(kPCSVarargs);
4592 tmp_list.Remove(arg0, arg1, arg2, arg3);
4593
4594 CPURegList fp_tmp_list = kCallerSavedFP;
4595 fp_tmp_list.Remove(kPCSVarargsFP);
4596 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4597
4598 // Override the MacroAssembler's scratch register list. The lists will be
4599 // reset automatically at the end of the UseScratchRegisterScope.
4600 UseScratchRegisterScope temps(this);
4601 TmpList()->set_list(tmp_list.list());
4602 FPTmpList()->set_list(fp_tmp_list.list());
4603
4604 // Copies of the printf vararg registers that we can pop from.
4605 CPURegList pcs_varargs = kPCSVarargs;
4606 CPURegList pcs_varargs_fp = kPCSVarargsFP;
4607
4608 // Place the arguments. There are lots of clever tricks and optimizations we
4609 // could use here, but Printf is a debug tool so instead we just try to keep
4610 // it simple: Move each input that isn't already in the right place to a
4611 // scratch register, then move everything back.
4612 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
4613 // Work out the proper PCS register for this argument.
4614 if (args[i].IsRegister()) {
4615 pcs[i] = pcs_varargs.PopLowestIndex().X();
4616 // We might only need a W register here. We need to know the size of the
4617 // argument so we can properly encode it for the simulator call.
4618 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
4619 } else if (args[i].IsFPRegister()) {
4620 // In C, floats are always cast to doubles for varargs calls.
4621 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
4622 } else {
4623 DCHECK(args[i].IsNone());
4624 arg_count = i;
4625 break;
4626 }
4627
4628 // If the argument is already in the right place, leave it where it is.
4629 if (args[i].Aliases(pcs[i])) continue;
4630
4631 // Otherwise, if the argument is in a PCS argument register, allocate an
4632 // appropriate scratch register and then move it out of the way.
4633 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
4634 kPCSVarargsFP.IncludesAliasOf(args[i])) {
4635 if (args[i].IsRegister()) {
4636 Register old_arg = Register(args[i]);
4637 Register new_arg = temps.AcquireSameSizeAs(old_arg);
4638 Mov(new_arg, old_arg);
4639 args[i] = new_arg;
4640 } else {
4641 FPRegister old_arg = FPRegister(args[i]);
4642 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
4643 Fmov(new_arg, old_arg);
4644 args[i] = new_arg;
4645 }
4646 }
4647 }
4648
4649 // Do a second pass to move values into their final positions and perform any
4650 // conversions that may be required.
4651 for (int i = 0; i < arg_count; i++) {
4652 DCHECK(pcs[i].type() == args[i].type());
4653 if (pcs[i].IsRegister()) {
4654 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
4655 } else {
4656 DCHECK(pcs[i].IsFPRegister());
4657 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
4658 Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
4659 } else {
4660 Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
4661 }
4662 }
4663 }
4664
4665 // Load the format string into x0, as per the procedure-call standard.
4666 //
4667 // To make the code as portable as possible, the format string is encoded
4668 // directly in the instruction stream. It might be cleaner to encode it in a
4669 // literal pool, but since Printf is usually used for debugging, it is
4670 // beneficial for it to be minimally dependent on other features.
4671 Label format_address;
4672 Adr(x0, &format_address);
4673
4674 // Emit the format string directly in the instruction stream.
4675 { BlockPoolsScope scope(this);
4676 Label after_data;
4677 B(&after_data);
4678 Bind(&format_address);
4679 EmitStringData(format);
4680 Unreachable();
4681 Bind(&after_data);
4682 }
4683
4684 // We don't pass any arguments on the stack, but we still need to align the C
4685 // stack pointer to a 16-byte boundary for PCS compliance.
4686 if (!csp.Is(StackPointer())) {
4687 Bic(csp, StackPointer(), 0xf);
4688 }
4689
4690 CallPrintf(arg_count, pcs);
4691}
4692
4693
4694void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
4695 // A call to printf needs special handling for the simulator, since the system
4696 // printf function will use a different instruction set and the procedure-call
4697 // standard will not be compatible.
4698#ifdef USE_SIMULATOR
4699 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
4700 hlt(kImmExceptionIsPrintf);
4701 dc32(arg_count); // kPrintfArgCountOffset
4702
4703 // Determine the argument pattern.
4704 uint32_t arg_pattern_list = 0;
4705 for (int i = 0; i < arg_count; i++) {
4706 uint32_t arg_pattern;
4707 if (args[i].IsRegister()) {
4708 arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
4709 } else {
4710 DCHECK(args[i].Is64Bits());
4711 arg_pattern = kPrintfArgD;
4712 }
4713 DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
4714 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
4715 }
4716 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
4717 }
4718#else
4719 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
4720#endif
4721}
4722
4723
4724void MacroAssembler::Printf(const char * format,
4725 CPURegister arg0,
4726 CPURegister arg1,
4727 CPURegister arg2,
4728 CPURegister arg3) {
4729 // We can only print sp if it is the current stack pointer.
4730 if (!csp.Is(StackPointer())) {
4731 DCHECK(!csp.Aliases(arg0));
4732 DCHECK(!csp.Aliases(arg1));
4733 DCHECK(!csp.Aliases(arg2));
4734 DCHECK(!csp.Aliases(arg3));
4735 }
4736
4737 // Printf is expected to preserve all registers, so make sure that none are
4738 // available as scratch registers until we've preserved them.
4739 RegList old_tmp_list = TmpList()->list();
4740 RegList old_fp_tmp_list = FPTmpList()->list();
4741 TmpList()->set_list(0);
4742 FPTmpList()->set_list(0);
4743
4744 // Preserve all caller-saved registers as well as NZCV.
4745 // If csp is the stack pointer, PushCPURegList asserts that the size of each
4746 // list is a multiple of 16 bytes.
4747 PushCPURegList(kCallerSaved);
4748 PushCPURegList(kCallerSavedFP);
4749
4750 // We can use caller-saved registers as scratch values (except for argN).
4751 CPURegList tmp_list = kCallerSaved;
4752 CPURegList fp_tmp_list = kCallerSavedFP;
4753 tmp_list.Remove(arg0, arg1, arg2, arg3);
4754 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4755 TmpList()->set_list(tmp_list.list());
4756 FPTmpList()->set_list(fp_tmp_list.list());
4757
4758 { UseScratchRegisterScope temps(this);
4759 // If any of the arguments are the current stack pointer, allocate a new
4760 // register for them, and adjust the value to compensate for pushing the
4761 // caller-saved registers.
4762 bool arg0_sp = StackPointer().Aliases(arg0);
4763 bool arg1_sp = StackPointer().Aliases(arg1);
4764 bool arg2_sp = StackPointer().Aliases(arg2);
4765 bool arg3_sp = StackPointer().Aliases(arg3);
4766 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
4767 // Allocate a register to hold the original stack pointer value, to pass
4768 // to PrintfNoPreserve as an argument.
4769 Register arg_sp = temps.AcquireX();
4770 Add(arg_sp, StackPointer(),
4771 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
4772 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
4773 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
4774 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
4775 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
4776 }
4777
4778 // Preserve NZCV.
4779 { UseScratchRegisterScope temps(this);
4780 Register tmp = temps.AcquireX();
4781 Mrs(tmp, NZCV);
4782 Push(tmp, xzr);
4783 }
4784
4785 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
4786
4787 // Restore NZCV.
4788 { UseScratchRegisterScope temps(this);
4789 Register tmp = temps.AcquireX();
4790 Pop(xzr, tmp);
4791 Msr(NZCV, tmp);
4792 }
4793 }
4794
4795 PopCPURegList(kCallerSavedFP);
4796 PopCPURegList(kCallerSaved);
4797
4798 TmpList()->set_list(old_tmp_list);
4799 FPTmpList()->set_list(old_fp_tmp_list);
4800}
4801
4802
4803void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
4804 // TODO(jbramley): Other architectures use the internal memcpy to copy the
4805 // sequence. If this is a performance bottleneck, we should consider caching
4806 // the sequence and copying it in the same way.
4807 InstructionAccurateScope scope(this,
4808 kNoCodeAgeSequenceLength / kInstructionSize);
4809 DCHECK(jssp.Is(StackPointer()));
4810 EmitFrameSetupForCodeAgePatching(this);
4811}
4812
4813
4814
4815void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
4816 InstructionAccurateScope scope(this,
4817 kNoCodeAgeSequenceLength / kInstructionSize);
4818 DCHECK(jssp.Is(StackPointer()));
4819 EmitCodeAgeSequence(this, stub);
4820}
4821
4822
4823#undef __
4824#define __ assm->
4825
4826
4827void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
4828 Label start;
4829 __ bind(&start);
4830
4831 // We can do this sequence using four instructions, but the code ageing
4832 // sequence that patches it needs five, so we use the extra space to try to
4833 // simplify some addressing modes and remove some dependencies (compared to
4834 // using two stp instructions with write-back).
4835 __ sub(jssp, jssp, 4 * kXRegSize);
4836 __ sub(csp, csp, 4 * kXRegSize);
4837 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
4838 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
4839 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
4840
4841 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
4842}
4843
4844
4845void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
4846 Code * stub) {
4847 Label start;
4848 __ bind(&start);
4849 // When the stub is called, the sequence is replaced with the young sequence
4850 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
4851 // stub jumps to &start, stored in x0. The young sequence does not call the
4852 // stub so there is no infinite loop here.
4853 //
4854 // A branch (br) is used rather than a call (blr) because this code replaces
4855 // the frame setup code that would normally preserve lr.
4856 __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
4857 __ adr(x0, &start);
4858 __ br(ip0);
4859 // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
4860 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
4861 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
4862 if (stub) {
4863 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
4864 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
4865 }
4866}
4867
4868
4869bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
4870 bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
4871 DCHECK(is_young ||
4872 isolate->code_aging_helper()->IsOld(sequence));
4873 return is_young;
4874}
4875
4876
4877void MacroAssembler::TruncatingDiv(Register result,
4878 Register dividend,
4879 int32_t divisor) {
4880 DCHECK(!AreAliased(result, dividend));
4881 DCHECK(result.Is32Bits() && dividend.Is32Bits());
4882 base::MagicNumbersForDivision<uint32_t> mag =
4883 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
4884 Mov(result, mag.multiplier);
4885 Smull(result.X(), dividend, result);
4886 Asr(result.X(), result.X(), 32);
4887 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
4888 if (divisor > 0 && neg) Add(result, result, dividend);
4889 if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
4890 if (mag.shift > 0) Asr(result, result, mag.shift);
4891 Add(result, result, Operand(dividend, LSR, 31));
4892}
4893
4894
4895#undef __
4896
4897
4898UseScratchRegisterScope::~UseScratchRegisterScope() {
4899 available_->set_list(old_available_);
4900 availablefp_->set_list(old_availablefp_);
4901}
4902
4903
4904Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
4905 int code = AcquireNextAvailable(available_).code();
4906 return Register::Create(code, reg.SizeInBits());
4907}
4908
4909
4910FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
4911 int code = AcquireNextAvailable(availablefp_).code();
4912 return FPRegister::Create(code, reg.SizeInBits());
4913}
4914
4915
4916CPURegister UseScratchRegisterScope::AcquireNextAvailable(
4917 CPURegList* available) {
4918 CHECK(!available->IsEmpty());
4919 CPURegister result = available->PopLowestIndex();
4920 DCHECK(!AreAliased(result, xzr, csp));
4921 return result;
4922}
4923
4924
4925CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
4926 const CPURegister& reg) {
4927 DCHECK(available->IncludesAliasOf(reg));
4928 available->Remove(reg);
4929 return reg;
4930}
4931
4932
4933#define __ masm->
4934
4935
4936void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
4937 const Label* smi_check) {
4938 Assembler::BlockPoolsScope scope(masm);
4939 if (reg.IsValid()) {
4940 DCHECK(smi_check->is_bound());
4941 DCHECK(reg.Is64Bits());
4942
4943 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
4944 // 'check' in the other bits. The possible offset is limited in that we
4945 // use BitField to pack the data, and the underlying data type is a
4946 // uint32_t.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004947 uint32_t delta =
4948 static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004949 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
4950 } else {
4951 DCHECK(!smi_check->is_bound());
4952
4953 // An offset of 0 indicates that there is no patch site.
4954 __ InlineData(0);
4955 }
4956}
4957
4958
4959InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
4960 : reg_(NoReg), smi_check_(NULL) {
4961 InstructionSequence* inline_data = InstructionSequence::At(info);
4962 DCHECK(inline_data->IsInlineData());
4963 if (inline_data->IsInlineData()) {
4964 uint64_t payload = inline_data->InlineData();
4965 // We use BitField to decode the payload, and BitField can only handle
4966 // 32-bit values.
4967 DCHECK(is_uint32(payload));
4968 if (payload != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004969 uint32_t payload32 = static_cast<uint32_t>(payload);
4970 int reg_code = RegisterBits::decode(payload32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004971 reg_ = Register::XRegFromCode(reg_code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004972 int smi_check_delta = DeltaBits::decode(payload32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004973 DCHECK(smi_check_delta != 0);
4974 smi_check_ = inline_data->preceding(smi_check_delta);
4975 }
4976 }
4977}
4978
4979
4980#undef __
4981
4982
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004983} // namespace internal
4984} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004985
4986#endif // V8_TARGET_ARCH_ARM64