blob: 8a54e20645cf72191abc73bdb331f3f7bccd49a5 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#if V8_TARGET_ARCH_ARM64
6
7#include "src/base/bits.h"
8#include "src/base/division-by-constant.h"
9#include "src/bootstrapper.h"
10#include "src/codegen.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011#include "src/debug/debug.h"
12#include "src/register-configuration.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -040013#include "src/runtime/runtime.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000015#include "src/arm64/frames-arm64.h"
16#include "src/arm64/macro-assembler-arm64.h"
17
Ben Murdochb8a8cc12014-11-26 15:28:44 +000018namespace v8 {
19namespace internal {
20
21// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
22#define __
23
24
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000025MacroAssembler::MacroAssembler(Isolate* arg_isolate, byte* buffer,
26 unsigned buffer_size,
27 CodeObjectRequired create_code_object)
Ben Murdochb8a8cc12014-11-26 15:28:44 +000028 : Assembler(arg_isolate, buffer, buffer_size),
29 generating_stub_(false),
30#if DEBUG
31 allow_macro_instructions_(true),
32#endif
33 has_frame_(false),
34 use_real_aborts_(true),
35 sp_(jssp),
36 tmp_list_(DefaultTmpList()),
37 fptmp_list_(DefaultFPTmpList()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000038 if (create_code_object == CodeObjectRequired::kYes) {
39 code_object_ =
40 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000041 }
42}
43
44
45CPURegList MacroAssembler::DefaultTmpList() {
46 return CPURegList(ip0, ip1);
47}
48
49
50CPURegList MacroAssembler::DefaultFPTmpList() {
51 return CPURegList(fp_scratch1, fp_scratch2);
52}
53
54
55void MacroAssembler::LogicalMacro(const Register& rd,
56 const Register& rn,
57 const Operand& operand,
58 LogicalOp op) {
59 UseScratchRegisterScope temps(this);
60
61 if (operand.NeedsRelocation(this)) {
62 Register temp = temps.AcquireX();
63 Ldr(temp, operand.immediate());
64 Logical(rd, rn, temp, op);
65
66 } else if (operand.IsImmediate()) {
67 int64_t immediate = operand.ImmediateValue();
68 unsigned reg_size = rd.SizeInBits();
69
70 // If the operation is NOT, invert the operation and immediate.
71 if ((op & NOT) == NOT) {
72 op = static_cast<LogicalOp>(op & ~NOT);
73 immediate = ~immediate;
74 }
75
76 // Ignore the top 32 bits of an immediate if we're moving to a W register.
77 if (rd.Is32Bits()) {
78 // Check that the top 32 bits are consistent.
79 DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
80 ((immediate >> kWRegSizeInBits) == -1));
81 immediate &= kWRegMask;
82 }
83
84 DCHECK(rd.Is64Bits() || is_uint32(immediate));
85
86 // Special cases for all set or all clear immediates.
87 if (immediate == 0) {
88 switch (op) {
89 case AND:
90 Mov(rd, 0);
91 return;
92 case ORR: // Fall through.
93 case EOR:
94 Mov(rd, rn);
95 return;
96 case ANDS: // Fall through.
97 case BICS:
98 break;
99 default:
100 UNREACHABLE();
101 }
102 } else if ((rd.Is64Bits() && (immediate == -1L)) ||
103 (rd.Is32Bits() && (immediate == 0xffffffffL))) {
104 switch (op) {
105 case AND:
106 Mov(rd, rn);
107 return;
108 case ORR:
109 Mov(rd, immediate);
110 return;
111 case EOR:
112 Mvn(rd, rn);
113 return;
114 case ANDS: // Fall through.
115 case BICS:
116 break;
117 default:
118 UNREACHABLE();
119 }
120 }
121
122 unsigned n, imm_s, imm_r;
123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
124 // Immediate can be encoded in the instruction.
125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
126 } else {
127 // Immediate can't be encoded: synthesize using move immediate.
128 Register temp = temps.AcquireSameSizeAs(rn);
129 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
130 if (rd.Is(csp)) {
131 // If rd is the stack pointer we cannot use it as the destination
132 // register so we use the temp register as an intermediate again.
133 Logical(temp, rn, imm_operand, op);
134 Mov(csp, temp);
135 AssertStackConsistency();
136 } else {
137 Logical(rd, rn, imm_operand, op);
138 }
139 }
140
141 } else if (operand.IsExtendedRegister()) {
142 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
143 // Add/sub extended supports shift <= 4. We want to support exactly the
144 // same modes here.
145 DCHECK(operand.shift_amount() <= 4);
146 DCHECK(operand.reg().Is64Bits() ||
147 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
148 Register temp = temps.AcquireSameSizeAs(rn);
149 EmitExtendShift(temp, operand.reg(), operand.extend(),
150 operand.shift_amount());
151 Logical(rd, rn, temp, op);
152
153 } else {
154 // The operand can be encoded in the instruction.
155 DCHECK(operand.IsShiftedRegister());
156 Logical(rd, rn, operand, op);
157 }
158}
159
160
161void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
162 DCHECK(allow_macro_instructions_);
163 DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
164 DCHECK(!rd.IsZero());
165
166 // TODO(all) extend to support more immediates.
167 //
168 // Immediates on Aarch64 can be produced using an initial value, and zero to
169 // three move keep operations.
170 //
171 // Initial values can be generated with:
172 // 1. 64-bit move zero (movz).
173 // 2. 32-bit move inverted (movn).
174 // 3. 64-bit move inverted.
175 // 4. 32-bit orr immediate.
176 // 5. 64-bit orr immediate.
177 // Move-keep may then be used to modify each of the 16-bit half-words.
178 //
179 // The code below supports all five initial value generators, and
180 // applying move-keep operations to move-zero and move-inverted initial
181 // values.
182
183 // Try to move the immediate in one instruction, and if that fails, switch to
184 // using multiple instructions.
185 if (!TryOneInstrMoveImmediate(rd, imm)) {
186 unsigned reg_size = rd.SizeInBits();
187
188 // Generic immediate case. Imm will be represented by
189 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
190 // A move-zero or move-inverted is generated for the first non-zero or
191 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
192
193 uint64_t ignored_halfword = 0;
194 bool invert_move = false;
195 // If the number of 0xffff halfwords is greater than the number of 0x0000
196 // halfwords, it's more efficient to use move-inverted.
197 if (CountClearHalfWords(~imm, reg_size) >
198 CountClearHalfWords(imm, reg_size)) {
199 ignored_halfword = 0xffffL;
200 invert_move = true;
201 }
202
203 // Mov instructions can't move immediate values into the stack pointer, so
204 // set up a temporary register, if needed.
205 UseScratchRegisterScope temps(this);
206 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
207
208 // Iterate through the halfwords. Use movn/movz for the first non-ignored
209 // halfword, and movk for subsequent halfwords.
210 DCHECK((reg_size % 16) == 0);
211 bool first_mov_done = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000212 for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000213 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
214 if (imm16 != ignored_halfword) {
215 if (!first_mov_done) {
216 if (invert_move) {
217 movn(temp, (~imm16) & 0xffffL, 16 * i);
218 } else {
219 movz(temp, imm16, 16 * i);
220 }
221 first_mov_done = true;
222 } else {
223 // Construct a wider constant.
224 movk(temp, imm16, 16 * i);
225 }
226 }
227 }
228 DCHECK(first_mov_done);
229
230 // Move the temporary if the original destination register was the stack
231 // pointer.
232 if (rd.IsSP()) {
233 mov(rd, temp);
234 AssertStackConsistency();
235 }
236 }
237}
238
239
240void MacroAssembler::Mov(const Register& rd,
241 const Operand& operand,
242 DiscardMoveMode discard_mode) {
243 DCHECK(allow_macro_instructions_);
244 DCHECK(!rd.IsZero());
245
246 // Provide a swap register for instructions that need to write into the
247 // system stack pointer (and can't do this inherently).
248 UseScratchRegisterScope temps(this);
249 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
250
251 if (operand.NeedsRelocation(this)) {
252 Ldr(dst, operand.immediate());
253
254 } else if (operand.IsImmediate()) {
255 // Call the macro assembler for generic immediates.
256 Mov(dst, operand.ImmediateValue());
257
258 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
259 // Emit a shift instruction if moving a shifted register. This operation
260 // could also be achieved using an orr instruction (like orn used by Mvn),
261 // but using a shift instruction makes the disassembly clearer.
262 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
263
264 } else if (operand.IsExtendedRegister()) {
265 // Emit an extend instruction if moving an extended register. This handles
266 // extend with post-shift operations, too.
267 EmitExtendShift(dst, operand.reg(), operand.extend(),
268 operand.shift_amount());
269
270 } else {
271 // Otherwise, emit a register move only if the registers are distinct, or
272 // if they are not X registers.
273 //
274 // Note that mov(w0, w0) is not a no-op because it clears the top word of
275 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
276 // registers is not required to clear the top word of the X register. In
277 // this case, the instruction is discarded.
278 //
279 // If csp is an operand, add #0 is emitted, otherwise, orr #0.
280 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
281 (discard_mode == kDontDiscardForSameWReg))) {
282 Assembler::mov(rd, operand.reg());
283 }
284 // This case can handle writes into the system stack pointer directly.
285 dst = rd;
286 }
287
288 // Copy the result to the system stack pointer.
289 if (!dst.Is(rd)) {
290 DCHECK(rd.IsSP());
291 Assembler::mov(rd, dst);
292 }
293}
294
295
296void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
297 DCHECK(allow_macro_instructions_);
298
299 if (operand.NeedsRelocation(this)) {
300 Ldr(rd, operand.immediate());
301 mvn(rd, rd);
302
303 } else if (operand.IsImmediate()) {
304 // Call the macro assembler for generic immediates.
305 Mov(rd, ~operand.ImmediateValue());
306
307 } else if (operand.IsExtendedRegister()) {
308 // Emit two instructions for the extend case. This differs from Mov, as
309 // the extend and invert can't be achieved in one instruction.
310 EmitExtendShift(rd, operand.reg(), operand.extend(),
311 operand.shift_amount());
312 mvn(rd, rd);
313
314 } else {
315 mvn(rd, operand);
316 }
317}
318
319
320unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
321 DCHECK((reg_size % 8) == 0);
322 int count = 0;
323 for (unsigned i = 0; i < (reg_size / 16); i++) {
324 if ((imm & 0xffff) == 0) {
325 count++;
326 }
327 imm >>= 16;
328 }
329 return count;
330}
331
332
333// The movz instruction can generate immediates containing an arbitrary 16-bit
334// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
335bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
336 DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
337 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
338}
339
340
341// The movn instruction can generate immediates containing an arbitrary 16-bit
342// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
343bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
344 return IsImmMovz(~imm, reg_size);
345}
346
347
348void MacroAssembler::ConditionalCompareMacro(const Register& rn,
349 const Operand& operand,
350 StatusFlags nzcv,
351 Condition cond,
352 ConditionalCompareOp op) {
353 DCHECK((cond != al) && (cond != nv));
354 if (operand.NeedsRelocation(this)) {
355 UseScratchRegisterScope temps(this);
356 Register temp = temps.AcquireX();
357 Ldr(temp, operand.immediate());
358 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
359
360 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
361 (operand.IsImmediate() &&
362 IsImmConditionalCompare(operand.ImmediateValue()))) {
363 // The immediate can be encoded in the instruction, or the operand is an
364 // unshifted register: call the assembler.
365 ConditionalCompare(rn, operand, nzcv, cond, op);
366
367 } else {
368 // The operand isn't directly supported by the instruction: perform the
369 // operation on a temporary register.
370 UseScratchRegisterScope temps(this);
371 Register temp = temps.AcquireSameSizeAs(rn);
372 Mov(temp, operand);
373 ConditionalCompare(rn, temp, nzcv, cond, op);
374 }
375}
376
377
378void MacroAssembler::Csel(const Register& rd,
379 const Register& rn,
380 const Operand& operand,
381 Condition cond) {
382 DCHECK(allow_macro_instructions_);
383 DCHECK(!rd.IsZero());
384 DCHECK((cond != al) && (cond != nv));
385 if (operand.IsImmediate()) {
386 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
387 // register.
388 int64_t imm = operand.ImmediateValue();
389 Register zr = AppropriateZeroRegFor(rn);
390 if (imm == 0) {
391 csel(rd, rn, zr, cond);
392 } else if (imm == 1) {
393 csinc(rd, rn, zr, cond);
394 } else if (imm == -1) {
395 csinv(rd, rn, zr, cond);
396 } else {
397 UseScratchRegisterScope temps(this);
398 Register temp = temps.AcquireSameSizeAs(rn);
399 Mov(temp, imm);
400 csel(rd, rn, temp, cond);
401 }
402 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
403 // Unshifted register argument.
404 csel(rd, rn, operand.reg(), cond);
405 } else {
406 // All other arguments.
407 UseScratchRegisterScope temps(this);
408 Register temp = temps.AcquireSameSizeAs(rn);
409 Mov(temp, operand);
410 csel(rd, rn, temp, cond);
411 }
412}
413
414
415bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
416 int64_t imm) {
417 unsigned n, imm_s, imm_r;
418 int reg_size = dst.SizeInBits();
419 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
420 // Immediate can be represented in a move zero instruction. Movz can't write
421 // to the stack pointer.
422 movz(dst, imm);
423 return true;
424 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
425 // Immediate can be represented in a move not instruction. Movn can't write
426 // to the stack pointer.
427 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
428 return true;
429 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
430 // Immediate can be represented in a logical orr instruction.
431 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
432 return true;
433 }
434 return false;
435}
436
437
438Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
439 int64_t imm) {
440 int reg_size = dst.SizeInBits();
441
442 // Encode the immediate in a single move instruction, if possible.
443 if (TryOneInstrMoveImmediate(dst, imm)) {
444 // The move was successful; nothing to do here.
445 } else {
446 // Pre-shift the immediate to the least-significant bits of the register.
447 int shift_low = CountTrailingZeros(imm, reg_size);
448 int64_t imm_low = imm >> shift_low;
449
450 // Pre-shift the immediate to the most-significant bits of the register. We
451 // insert set bits in the least-significant bits, as this creates a
452 // different immediate that may be encodable using movn or orr-immediate.
453 // If this new immediate is encodable, the set bits will be eliminated by
454 // the post shift on the following instruction.
455 int shift_high = CountLeadingZeros(imm, reg_size);
456 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
457
458 if (TryOneInstrMoveImmediate(dst, imm_low)) {
459 // The new immediate has been moved into the destination's low bits:
460 // return a new leftward-shifting operand.
461 return Operand(dst, LSL, shift_low);
462 } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
463 // The new immediate has been moved into the destination's high bits:
464 // return a new rightward-shifting operand.
465 return Operand(dst, LSR, shift_high);
466 } else {
467 // Use the generic move operation to set up the immediate.
468 Mov(dst, imm);
469 }
470 }
471 return Operand(dst);
472}
473
474
475void MacroAssembler::AddSubMacro(const Register& rd,
476 const Register& rn,
477 const Operand& operand,
478 FlagsUpdate S,
479 AddSubOp op) {
480 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
481 !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
482 // The instruction would be a nop. Avoid generating useless code.
483 return;
484 }
485
486 if (operand.NeedsRelocation(this)) {
487 UseScratchRegisterScope temps(this);
488 Register temp = temps.AcquireX();
489 Ldr(temp, operand.immediate());
490 AddSubMacro(rd, rn, temp, S, op);
491 } else if ((operand.IsImmediate() &&
492 !IsImmAddSub(operand.ImmediateValue())) ||
493 (rn.IsZero() && !operand.IsShiftedRegister()) ||
494 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
495 UseScratchRegisterScope temps(this);
496 Register temp = temps.AcquireSameSizeAs(rn);
497 if (operand.IsImmediate()) {
498 Operand imm_operand =
499 MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
500 AddSub(rd, rn, imm_operand, S, op);
501 } else {
502 Mov(temp, operand);
503 AddSub(rd, rn, temp, S, op);
504 }
505 } else {
506 AddSub(rd, rn, operand, S, op);
507 }
508}
509
510
511void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
512 const Register& rn,
513 const Operand& operand,
514 FlagsUpdate S,
515 AddSubWithCarryOp op) {
516 DCHECK(rd.SizeInBits() == rn.SizeInBits());
517 UseScratchRegisterScope temps(this);
518
519 if (operand.NeedsRelocation(this)) {
520 Register temp = temps.AcquireX();
521 Ldr(temp, operand.immediate());
522 AddSubWithCarryMacro(rd, rn, temp, S, op);
523
524 } else if (operand.IsImmediate() ||
525 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
526 // Add/sub with carry (immediate or ROR shifted register.)
527 Register temp = temps.AcquireSameSizeAs(rn);
528 Mov(temp, operand);
529 AddSubWithCarry(rd, rn, temp, S, op);
530
531 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
532 // Add/sub with carry (shifted register).
533 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
534 DCHECK(operand.shift() != ROR);
535 DCHECK(is_uintn(operand.shift_amount(),
536 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
537 : kWRegSizeInBitsLog2));
538 Register temp = temps.AcquireSameSizeAs(rn);
539 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
540 AddSubWithCarry(rd, rn, temp, S, op);
541
542 } else if (operand.IsExtendedRegister()) {
543 // Add/sub with carry (extended register).
544 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
545 // Add/sub extended supports a shift <= 4. We want to support exactly the
546 // same modes.
547 DCHECK(operand.shift_amount() <= 4);
548 DCHECK(operand.reg().Is64Bits() ||
549 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
550 Register temp = temps.AcquireSameSizeAs(rn);
551 EmitExtendShift(temp, operand.reg(), operand.extend(),
552 operand.shift_amount());
553 AddSubWithCarry(rd, rn, temp, S, op);
554
555 } else {
556 // The addressing mode is directly supported by the instruction.
557 AddSubWithCarry(rd, rn, operand, S, op);
558 }
559}
560
561
562void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
563 const MemOperand& addr,
564 LoadStoreOp op) {
565 int64_t offset = addr.offset();
566 LSDataSize size = CalcLSDataSize(op);
567
568 // Check if an immediate offset fits in the immediate field of the
569 // appropriate instruction. If not, emit two instructions to perform
570 // the operation.
571 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
572 !IsImmLSUnscaled(offset)) {
573 // Immediate offset that can't be encoded using unsigned or unscaled
574 // addressing modes.
575 UseScratchRegisterScope temps(this);
576 Register temp = temps.AcquireSameSizeAs(addr.base());
577 Mov(temp, addr.offset());
578 LoadStore(rt, MemOperand(addr.base(), temp), op);
579 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
580 // Post-index beyond unscaled addressing range.
581 LoadStore(rt, MemOperand(addr.base()), op);
582 add(addr.base(), addr.base(), offset);
583 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
584 // Pre-index beyond unscaled addressing range.
585 add(addr.base(), addr.base(), offset);
586 LoadStore(rt, MemOperand(addr.base()), op);
587 } else {
588 // Encodable in one load/store instruction.
589 LoadStore(rt, addr, op);
590 }
591}
592
593void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
594 const CPURegister& rt2,
595 const MemOperand& addr,
596 LoadStorePairOp op) {
597 // TODO(all): Should we support register offset for load-store-pair?
598 DCHECK(!addr.IsRegisterOffset());
599
600 int64_t offset = addr.offset();
601 LSDataSize size = CalcLSPairDataSize(op);
602
603 // Check if the offset fits in the immediate field of the appropriate
604 // instruction. If not, emit two instructions to perform the operation.
605 if (IsImmLSPair(offset, size)) {
606 // Encodable in one load/store pair instruction.
607 LoadStorePair(rt, rt2, addr, op);
608 } else {
609 Register base = addr.base();
610 if (addr.IsImmediateOffset()) {
611 UseScratchRegisterScope temps(this);
612 Register temp = temps.AcquireSameSizeAs(base);
613 Add(temp, base, offset);
614 LoadStorePair(rt, rt2, MemOperand(temp), op);
615 } else if (addr.IsPostIndex()) {
616 LoadStorePair(rt, rt2, MemOperand(base), op);
617 Add(base, base, offset);
618 } else {
619 DCHECK(addr.IsPreIndex());
620 Add(base, base, offset);
621 LoadStorePair(rt, rt2, MemOperand(base), op);
622 }
623 }
624}
625
626
627void MacroAssembler::Load(const Register& rt,
628 const MemOperand& addr,
629 Representation r) {
630 DCHECK(!r.IsDouble());
631
632 if (r.IsInteger8()) {
633 Ldrsb(rt, addr);
634 } else if (r.IsUInteger8()) {
635 Ldrb(rt, addr);
636 } else if (r.IsInteger16()) {
637 Ldrsh(rt, addr);
638 } else if (r.IsUInteger16()) {
639 Ldrh(rt, addr);
640 } else if (r.IsInteger32()) {
641 Ldr(rt.W(), addr);
642 } else {
643 DCHECK(rt.Is64Bits());
644 Ldr(rt, addr);
645 }
646}
647
648
649void MacroAssembler::Store(const Register& rt,
650 const MemOperand& addr,
651 Representation r) {
652 DCHECK(!r.IsDouble());
653
654 if (r.IsInteger8() || r.IsUInteger8()) {
655 Strb(rt, addr);
656 } else if (r.IsInteger16() || r.IsUInteger16()) {
657 Strh(rt, addr);
658 } else if (r.IsInteger32()) {
659 Str(rt.W(), addr);
660 } else {
661 DCHECK(rt.Is64Bits());
662 if (r.IsHeapObject()) {
663 AssertNotSmi(rt);
664 } else if (r.IsSmi()) {
665 AssertSmi(rt);
666 }
667 Str(rt, addr);
668 }
669}
670
671
672bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
673 Label *label, ImmBranchType b_type) {
674 bool need_longer_range = false;
675 // There are two situations in which we care about the offset being out of
676 // range:
677 // - The label is bound but too far away.
678 // - The label is not bound but linked, and the previous branch
679 // instruction in the chain is too far away.
680 if (label->is_bound() || label->is_linked()) {
681 need_longer_range =
682 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
683 }
684 if (!need_longer_range && !label->is_bound()) {
685 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
686 unresolved_branches_.insert(
687 std::pair<int, FarBranchInfo>(max_reachable_pc,
688 FarBranchInfo(pc_offset(), label)));
689 // Also maintain the next pool check.
690 next_veneer_pool_check_ =
691 Min(next_veneer_pool_check_,
692 max_reachable_pc - kVeneerDistanceCheckMargin);
693 }
694 return need_longer_range;
695}
696
697
698void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
699 DCHECK(allow_macro_instructions_);
700 DCHECK(!rd.IsZero());
701
702 if (hint == kAdrNear) {
703 adr(rd, label);
704 return;
705 }
706
707 DCHECK(hint == kAdrFar);
708 if (label->is_bound()) {
709 int label_offset = label->pos() - pc_offset();
710 if (Instruction::IsValidPCRelOffset(label_offset)) {
711 adr(rd, label);
712 } else {
713 DCHECK(label_offset <= 0);
714 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
715 adr(rd, min_adr_offset);
716 Add(rd, rd, label_offset - min_adr_offset);
717 }
718 } else {
719 UseScratchRegisterScope temps(this);
720 Register scratch = temps.AcquireX();
721
722 InstructionAccurateScope scope(
723 this, PatchingAssembler::kAdrFarPatchableNInstrs);
724 adr(rd, label);
725 for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
726 nop(ADR_FAR_NOP);
727 }
728 movz(scratch, 0);
729 }
730}
731
732
733void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
734 DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
735 (bit == -1 || type >= kBranchTypeFirstUsingBit));
736 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
737 B(static_cast<Condition>(type), label);
738 } else {
739 switch (type) {
740 case always: B(label); break;
741 case never: break;
742 case reg_zero: Cbz(reg, label); break;
743 case reg_not_zero: Cbnz(reg, label); break;
744 case reg_bit_clear: Tbz(reg, bit, label); break;
745 case reg_bit_set: Tbnz(reg, bit, label); break;
746 default:
747 UNREACHABLE();
748 }
749 }
750}
751
752
753void MacroAssembler::B(Label* label, Condition cond) {
754 DCHECK(allow_macro_instructions_);
755 DCHECK((cond != al) && (cond != nv));
756
757 Label done;
758 bool need_extra_instructions =
759 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
760
761 if (need_extra_instructions) {
762 b(&done, NegateCondition(cond));
763 B(label);
764 } else {
765 b(label, cond);
766 }
767 bind(&done);
768}
769
770
771void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
772 DCHECK(allow_macro_instructions_);
773
774 Label done;
775 bool need_extra_instructions =
776 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
777
778 if (need_extra_instructions) {
779 tbz(rt, bit_pos, &done);
780 B(label);
781 } else {
782 tbnz(rt, bit_pos, label);
783 }
784 bind(&done);
785}
786
787
788void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
789 DCHECK(allow_macro_instructions_);
790
791 Label done;
792 bool need_extra_instructions =
793 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
794
795 if (need_extra_instructions) {
796 tbnz(rt, bit_pos, &done);
797 B(label);
798 } else {
799 tbz(rt, bit_pos, label);
800 }
801 bind(&done);
802}
803
804
805void MacroAssembler::Cbnz(const Register& rt, Label* label) {
806 DCHECK(allow_macro_instructions_);
807
808 Label done;
809 bool need_extra_instructions =
810 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
811
812 if (need_extra_instructions) {
813 cbz(rt, &done);
814 B(label);
815 } else {
816 cbnz(rt, label);
817 }
818 bind(&done);
819}
820
821
822void MacroAssembler::Cbz(const Register& rt, Label* label) {
823 DCHECK(allow_macro_instructions_);
824
825 Label done;
826 bool need_extra_instructions =
827 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
828
829 if (need_extra_instructions) {
830 cbnz(rt, &done);
831 B(label);
832 } else {
833 cbz(rt, label);
834 }
835 bind(&done);
836}
837
838
839// Pseudo-instructions.
840
841
842void MacroAssembler::Abs(const Register& rd, const Register& rm,
843 Label* is_not_representable,
844 Label* is_representable) {
845 DCHECK(allow_macro_instructions_);
846 DCHECK(AreSameSizeAndType(rd, rm));
847
848 Cmp(rm, 1);
849 Cneg(rd, rm, lt);
850
851 // If the comparison sets the v flag, the input was the smallest value
852 // representable by rm, and the mathematical result of abs(rm) is not
853 // representable using two's complement.
854 if ((is_not_representable != NULL) && (is_representable != NULL)) {
855 B(is_not_representable, vs);
856 B(is_representable);
857 } else if (is_not_representable != NULL) {
858 B(is_not_representable, vs);
859 } else if (is_representable != NULL) {
860 B(is_representable, vc);
861 }
862}
863
864
865// Abstracted stack operations.
866
867
868void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
869 const CPURegister& src2, const CPURegister& src3) {
870 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
871
872 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
873 int size = src0.SizeInBytes();
874
875 PushPreamble(count, size);
876 PushHelper(count, size, src0, src1, src2, src3);
877}
878
879
880void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
881 const CPURegister& src2, const CPURegister& src3,
882 const CPURegister& src4, const CPURegister& src5,
883 const CPURegister& src6, const CPURegister& src7) {
884 DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
885
886 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
887 int size = src0.SizeInBytes();
888
889 PushPreamble(count, size);
890 PushHelper(4, size, src0, src1, src2, src3);
891 PushHelper(count - 4, size, src4, src5, src6, src7);
892}
893
894
895void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
896 const CPURegister& dst2, const CPURegister& dst3) {
897 // It is not valid to pop into the same register more than once in one
898 // instruction, not even into the zero register.
899 DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
900 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
901 DCHECK(dst0.IsValid());
902
903 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
904 int size = dst0.SizeInBytes();
905
906 PopHelper(count, size, dst0, dst1, dst2, dst3);
907 PopPostamble(count, size);
908}
909
910
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000911void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
912 const CPURegister& dst2, const CPURegister& dst3,
913 const CPURegister& dst4, const CPURegister& dst5,
914 const CPURegister& dst6, const CPURegister& dst7) {
915 // It is not valid to pop into the same register more than once in one
916 // instruction, not even into the zero register.
917 DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
918 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
919 DCHECK(dst0.IsValid());
920
921 int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
922 int size = dst0.SizeInBytes();
923
924 PopHelper(4, size, dst0, dst1, dst2, dst3);
925 PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
926 PopPostamble(count, size);
927}
928
929
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000930void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
931 int size = src0.SizeInBytes() + src1.SizeInBytes();
932
933 PushPreamble(size);
934 // Reserve room for src0 and push src1.
935 str(src1, MemOperand(StackPointer(), -size, PreIndex));
936 // Fill the gap with src0.
937 str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
938}
939
940
941void MacroAssembler::PushPopQueue::PushQueued(
942 PreambleDirective preamble_directive) {
943 if (queued_.empty()) return;
944
945 if (preamble_directive == WITH_PREAMBLE) {
946 masm_->PushPreamble(size_);
947 }
948
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000949 size_t count = queued_.size();
950 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000951 while (index < count) {
952 // PushHelper can only handle registers with the same size and type, and it
953 // can handle only four at a time. Batch them up accordingly.
954 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
955 int batch_index = 0;
956 do {
957 batch[batch_index++] = queued_[index++];
958 } while ((batch_index < 4) && (index < count) &&
959 batch[0].IsSameSizeAndType(queued_[index]));
960
961 masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
962 batch[0], batch[1], batch[2], batch[3]);
963 }
964
965 queued_.clear();
966}
967
968
969void MacroAssembler::PushPopQueue::PopQueued() {
970 if (queued_.empty()) return;
971
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000972 size_t count = queued_.size();
973 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000974 while (index < count) {
975 // PopHelper can only handle registers with the same size and type, and it
976 // can handle only four at a time. Batch them up accordingly.
977 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
978 int batch_index = 0;
979 do {
980 batch[batch_index++] = queued_[index++];
981 } while ((batch_index < 4) && (index < count) &&
982 batch[0].IsSameSizeAndType(queued_[index]));
983
984 masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
985 batch[0], batch[1], batch[2], batch[3]);
986 }
987
988 masm_->PopPostamble(size_);
989 queued_.clear();
990}
991
992
993void MacroAssembler::PushCPURegList(CPURegList registers) {
994 int size = registers.RegisterSizeInBytes();
995
996 PushPreamble(registers.Count(), size);
997 // Push up to four registers at a time because if the current stack pointer is
998 // csp and reg_size is 32, registers must be pushed in blocks of four in order
999 // to maintain the 16-byte alignment for csp.
1000 while (!registers.IsEmpty()) {
1001 int count_before = registers.Count();
1002 const CPURegister& src0 = registers.PopHighestIndex();
1003 const CPURegister& src1 = registers.PopHighestIndex();
1004 const CPURegister& src2 = registers.PopHighestIndex();
1005 const CPURegister& src3 = registers.PopHighestIndex();
1006 int count = count_before - registers.Count();
1007 PushHelper(count, size, src0, src1, src2, src3);
1008 }
1009}
1010
1011
1012void MacroAssembler::PopCPURegList(CPURegList registers) {
1013 int size = registers.RegisterSizeInBytes();
1014
1015 // Pop up to four registers at a time because if the current stack pointer is
1016 // csp and reg_size is 32, registers must be pushed in blocks of four in
1017 // order to maintain the 16-byte alignment for csp.
1018 while (!registers.IsEmpty()) {
1019 int count_before = registers.Count();
1020 const CPURegister& dst0 = registers.PopLowestIndex();
1021 const CPURegister& dst1 = registers.PopLowestIndex();
1022 const CPURegister& dst2 = registers.PopLowestIndex();
1023 const CPURegister& dst3 = registers.PopLowestIndex();
1024 int count = count_before - registers.Count();
1025 PopHelper(count, size, dst0, dst1, dst2, dst3);
1026 }
1027 PopPostamble(registers.Count(), size);
1028}
1029
1030
1031void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
1032 int size = src.SizeInBytes();
1033
1034 PushPreamble(count, size);
1035
1036 if (FLAG_optimize_for_size && count > 8) {
1037 UseScratchRegisterScope temps(this);
1038 Register temp = temps.AcquireX();
1039
1040 Label loop;
1041 __ Mov(temp, count / 2);
1042 __ Bind(&loop);
1043 PushHelper(2, size, src, src, NoReg, NoReg);
1044 __ Subs(temp, temp, 1);
1045 __ B(ne, &loop);
1046
1047 count %= 2;
1048 }
1049
1050 // Push up to four registers at a time if possible because if the current
1051 // stack pointer is csp and the register size is 32, registers must be pushed
1052 // in blocks of four in order to maintain the 16-byte alignment for csp.
1053 while (count >= 4) {
1054 PushHelper(4, size, src, src, src, src);
1055 count -= 4;
1056 }
1057 if (count >= 2) {
1058 PushHelper(2, size, src, src, NoReg, NoReg);
1059 count -= 2;
1060 }
1061 if (count == 1) {
1062 PushHelper(1, size, src, NoReg, NoReg, NoReg);
1063 count -= 1;
1064 }
1065 DCHECK(count == 0);
1066}
1067
1068
1069void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1070 PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
1071
1072 UseScratchRegisterScope temps(this);
1073 Register temp = temps.AcquireSameSizeAs(count);
1074
1075 if (FLAG_optimize_for_size) {
1076 Label loop, done;
1077
1078 Subs(temp, count, 1);
1079 B(mi, &done);
1080
1081 // Push all registers individually, to save code size.
1082 Bind(&loop);
1083 Subs(temp, temp, 1);
1084 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1085 B(pl, &loop);
1086
1087 Bind(&done);
1088 } else {
1089 Label loop, leftover2, leftover1, done;
1090
1091 Subs(temp, count, 4);
1092 B(mi, &leftover2);
1093
1094 // Push groups of four first.
1095 Bind(&loop);
1096 Subs(temp, temp, 4);
1097 PushHelper(4, src.SizeInBytes(), src, src, src, src);
1098 B(pl, &loop);
1099
1100 // Push groups of two.
1101 Bind(&leftover2);
1102 Tbz(count, 1, &leftover1);
1103 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
1104
1105 // Push the last one (if required).
1106 Bind(&leftover1);
1107 Tbz(count, 0, &done);
1108 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1109
1110 Bind(&done);
1111 }
1112}
1113
1114
1115void MacroAssembler::PushHelper(int count, int size,
1116 const CPURegister& src0,
1117 const CPURegister& src1,
1118 const CPURegister& src2,
1119 const CPURegister& src3) {
1120 // Ensure that we don't unintentially modify scratch or debug registers.
1121 InstructionAccurateScope scope(this);
1122
1123 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1124 DCHECK(size == src0.SizeInBytes());
1125
1126 // When pushing multiple registers, the store order is chosen such that
1127 // Push(a, b) is equivalent to Push(a) followed by Push(b).
1128 switch (count) {
1129 case 1:
1130 DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1131 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
1132 break;
1133 case 2:
1134 DCHECK(src2.IsNone() && src3.IsNone());
1135 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
1136 break;
1137 case 3:
1138 DCHECK(src3.IsNone());
1139 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
1140 str(src0, MemOperand(StackPointer(), 2 * size));
1141 break;
1142 case 4:
1143 // Skip over 4 * size, then fill in the gap. This allows four W registers
1144 // to be pushed using csp, whilst maintaining 16-byte alignment for csp
1145 // at all times.
1146 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
1147 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
1148 break;
1149 default:
1150 UNREACHABLE();
1151 }
1152}
1153
1154
1155void MacroAssembler::PopHelper(int count, int size,
1156 const CPURegister& dst0,
1157 const CPURegister& dst1,
1158 const CPURegister& dst2,
1159 const CPURegister& dst3) {
1160 // Ensure that we don't unintentially modify scratch or debug registers.
1161 InstructionAccurateScope scope(this);
1162
1163 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1164 DCHECK(size == dst0.SizeInBytes());
1165
1166 // When popping multiple registers, the load order is chosen such that
1167 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1168 switch (count) {
1169 case 1:
1170 DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1171 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1172 break;
1173 case 2:
1174 DCHECK(dst2.IsNone() && dst3.IsNone());
1175 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1176 break;
1177 case 3:
1178 DCHECK(dst3.IsNone());
1179 ldr(dst2, MemOperand(StackPointer(), 2 * size));
1180 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1181 break;
1182 case 4:
1183 // Load the higher addresses first, then load the lower addresses and
1184 // skip the whole block in the second instruction. This allows four W
1185 // registers to be popped using csp, whilst maintaining 16-byte alignment
1186 // for csp at all times.
1187 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1188 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1189 break;
1190 default:
1191 UNREACHABLE();
1192 }
1193}
1194
1195
1196void MacroAssembler::PushPreamble(Operand total_size) {
1197 if (csp.Is(StackPointer())) {
1198 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1199 // on entry and the total size of the specified registers must also be a
1200 // multiple of 16 bytes.
1201 if (total_size.IsImmediate()) {
1202 DCHECK((total_size.ImmediateValue() % 16) == 0);
1203 }
1204
1205 // Don't check access size for non-immediate sizes. It's difficult to do
1206 // well, and it will be caught by hardware (or the simulator) anyway.
1207 } else {
1208 // Even if the current stack pointer is not the system stack pointer (csp),
1209 // the system stack pointer will still be modified in order to comply with
1210 // ABI rules about accessing memory below the system stack pointer.
1211 BumpSystemStackPointer(total_size);
1212 }
1213}
1214
1215
1216void MacroAssembler::PopPostamble(Operand total_size) {
1217 if (csp.Is(StackPointer())) {
1218 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1219 // on entry and the total size of the specified registers must also be a
1220 // multiple of 16 bytes.
1221 if (total_size.IsImmediate()) {
1222 DCHECK((total_size.ImmediateValue() % 16) == 0);
1223 }
1224
1225 // Don't check access size for non-immediate sizes. It's difficult to do
1226 // well, and it will be caught by hardware (or the simulator) anyway.
1227 } else if (emit_debug_code()) {
1228 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1229 // but if we keep it matching StackPointer, the simulator can detect memory
1230 // accesses in the now-free part of the stack.
1231 SyncSystemStackPointer();
1232 }
1233}
1234
1235
1236void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
1237 if (offset.IsImmediate()) {
1238 DCHECK(offset.ImmediateValue() >= 0);
1239 } else if (emit_debug_code()) {
1240 Cmp(xzr, offset);
1241 Check(le, kStackAccessBelowStackPointer);
1242 }
1243
1244 Str(src, MemOperand(StackPointer(), offset));
1245}
1246
1247
1248void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1249 if (offset.IsImmediate()) {
1250 DCHECK(offset.ImmediateValue() >= 0);
1251 } else if (emit_debug_code()) {
1252 Cmp(xzr, offset);
1253 Check(le, kStackAccessBelowStackPointer);
1254 }
1255
1256 Ldr(dst, MemOperand(StackPointer(), offset));
1257}
1258
1259
1260void MacroAssembler::PokePair(const CPURegister& src1,
1261 const CPURegister& src2,
1262 int offset) {
1263 DCHECK(AreSameSizeAndType(src1, src2));
1264 DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1265 Stp(src1, src2, MemOperand(StackPointer(), offset));
1266}
1267
1268
1269void MacroAssembler::PeekPair(const CPURegister& dst1,
1270 const CPURegister& dst2,
1271 int offset) {
1272 DCHECK(AreSameSizeAndType(dst1, dst2));
1273 DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1274 Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
1275}
1276
1277
1278void MacroAssembler::PushCalleeSavedRegisters() {
1279 // Ensure that the macro-assembler doesn't use any scratch registers.
1280 InstructionAccurateScope scope(this);
1281
1282 // This method must not be called unless the current stack pointer is the
1283 // system stack pointer (csp).
1284 DCHECK(csp.Is(StackPointer()));
1285
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001286 MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001287
1288 stp(d14, d15, tos);
1289 stp(d12, d13, tos);
1290 stp(d10, d11, tos);
1291 stp(d8, d9, tos);
1292
1293 stp(x29, x30, tos);
1294 stp(x27, x28, tos); // x28 = jssp
1295 stp(x25, x26, tos);
1296 stp(x23, x24, tos);
1297 stp(x21, x22, tos);
1298 stp(x19, x20, tos);
1299}
1300
1301
1302void MacroAssembler::PopCalleeSavedRegisters() {
1303 // Ensure that the macro-assembler doesn't use any scratch registers.
1304 InstructionAccurateScope scope(this);
1305
1306 // This method must not be called unless the current stack pointer is the
1307 // system stack pointer (csp).
1308 DCHECK(csp.Is(StackPointer()));
1309
1310 MemOperand tos(csp, 2 * kXRegSize, PostIndex);
1311
1312 ldp(x19, x20, tos);
1313 ldp(x21, x22, tos);
1314 ldp(x23, x24, tos);
1315 ldp(x25, x26, tos);
1316 ldp(x27, x28, tos); // x28 = jssp
1317 ldp(x29, x30, tos);
1318
1319 ldp(d8, d9, tos);
1320 ldp(d10, d11, tos);
1321 ldp(d12, d13, tos);
1322 ldp(d14, d15, tos);
1323}
1324
1325
1326void MacroAssembler::AssertStackConsistency() {
1327 // Avoid emitting code when !use_real_abort() since non-real aborts cause too
1328 // much code to be generated.
1329 if (emit_debug_code() && use_real_aborts()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001330 if (csp.Is(StackPointer())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001331 // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
1332 // can't check the alignment of csp without using a scratch register (or
1333 // clobbering the flags), but the processor (or simulator) will abort if
1334 // it is not properly aligned during a load.
1335 ldr(xzr, MemOperand(csp, 0));
1336 }
1337 if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
1338 Label ok;
1339 // Check that csp <= StackPointer(), preserving all registers and NZCV.
1340 sub(StackPointer(), csp, StackPointer());
1341 cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
1342 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
1343
1344 // Avoid generating AssertStackConsistency checks for the Push in Abort.
1345 { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001346 // Restore StackPointer().
1347 sub(StackPointer(), csp, StackPointer());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001348 Abort(kTheCurrentStackPointerIsBelowCsp);
1349 }
1350
1351 bind(&ok);
1352 // Restore StackPointer().
1353 sub(StackPointer(), csp, StackPointer());
1354 }
1355 }
1356}
1357
Ben Murdochda12d292016-06-02 14:46:10 +01001358void MacroAssembler::AssertCspAligned() {
1359 if (emit_debug_code() && use_real_aborts()) {
1360 // TODO(titzer): use a real assert for alignment check?
1361 UseScratchRegisterScope scope(this);
1362 Register temp = scope.AcquireX();
1363 ldr(temp, MemOperand(csp));
1364 }
1365}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001366
1367void MacroAssembler::AssertFPCRState(Register fpcr) {
1368 if (emit_debug_code()) {
1369 Label unexpected_mode, done;
1370 UseScratchRegisterScope temps(this);
1371 if (fpcr.IsNone()) {
1372 fpcr = temps.AcquireX();
1373 Mrs(fpcr, FPCR);
1374 }
1375
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001376 // Settings left to their default values:
1377 // - Assert that flush-to-zero is not set.
1378 Tbnz(fpcr, FZ_offset, &unexpected_mode);
1379 // - Assert that the rounding mode is nearest-with-ties-to-even.
1380 STATIC_ASSERT(FPTieEven == 0);
1381 Tst(fpcr, RMode_mask);
1382 B(eq, &done);
1383
1384 Bind(&unexpected_mode);
1385 Abort(kUnexpectedFPCRMode);
1386
1387 Bind(&done);
1388 }
1389}
1390
1391
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001392void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
1393 const FPRegister& src) {
1394 AssertFPCRState();
1395
Ben Murdochc5610432016-08-08 18:44:38 +01001396 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
1397 // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
1398 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001399 Fsub(dst, src, fp_zero);
1400}
1401
1402
1403void MacroAssembler::LoadRoot(CPURegister destination,
1404 Heap::RootListIndex index) {
1405 // TODO(jbramley): Most root values are constants, and can be synthesized
1406 // without a load. Refer to the ARM back end for details.
1407 Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
1408}
1409
1410
1411void MacroAssembler::StoreRoot(Register source,
1412 Heap::RootListIndex index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001413 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001414 Str(source, MemOperand(root, index << kPointerSizeLog2));
1415}
1416
1417
1418void MacroAssembler::LoadTrueFalseRoots(Register true_root,
1419 Register false_root) {
1420 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
1421 Ldp(true_root, false_root,
1422 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
1423}
1424
1425
1426void MacroAssembler::LoadHeapObject(Register result,
1427 Handle<HeapObject> object) {
1428 AllowDeferredHandleDereference using_raw_address;
1429 if (isolate()->heap()->InNewSpace(*object)) {
1430 Handle<Cell> cell = isolate()->factory()->NewCell(object);
1431 Mov(result, Operand(cell));
1432 Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
1433 } else {
1434 Mov(result, Operand(object));
1435 }
1436}
1437
1438
1439void MacroAssembler::LoadInstanceDescriptors(Register map,
1440 Register descriptors) {
1441 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
1442}
1443
1444
1445void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
1446 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
1447 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
1448}
1449
1450
1451void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
1452 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
1453 Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
1454 And(dst, dst, Map::EnumLengthBits::kMask);
1455}
1456
1457
1458void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
1459 EnumLengthUntagged(dst, map);
1460 SmiTag(dst, dst);
1461}
1462
1463
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001464void MacroAssembler::LoadAccessor(Register dst, Register holder,
1465 int accessor_index,
1466 AccessorComponent accessor) {
1467 Ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
1468 LoadInstanceDescriptors(dst, dst);
1469 Ldr(dst,
1470 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
1471 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
1472 : AccessorPair::kSetterOffset;
1473 Ldr(dst, FieldMemOperand(dst, offset));
1474}
1475
1476
Ben Murdoch097c5b22016-05-18 11:27:45 +01001477void MacroAssembler::CheckEnumCache(Register object, Register scratch0,
1478 Register scratch1, Register scratch2,
1479 Register scratch3, Register scratch4,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001480 Label* call_runtime) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001481 DCHECK(!AreAliased(object, scratch0, scratch1, scratch2, scratch3, scratch4));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001482
1483 Register empty_fixed_array_value = scratch0;
1484 Register current_object = scratch1;
Ben Murdoch097c5b22016-05-18 11:27:45 +01001485 Register null_value = scratch4;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001486
1487 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1488 Label next, start;
1489
1490 Mov(current_object, object);
1491
1492 // Check if the enum length field is properly initialized, indicating that
1493 // there is an enum cache.
1494 Register map = scratch2;
1495 Register enum_length = scratch3;
1496 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1497
1498 EnumLengthUntagged(enum_length, map);
1499 Cmp(enum_length, kInvalidEnumCacheSentinel);
1500 B(eq, call_runtime);
1501
Ben Murdoch097c5b22016-05-18 11:27:45 +01001502 LoadRoot(null_value, Heap::kNullValueRootIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001503 B(&start);
1504
1505 Bind(&next);
1506 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1507
1508 // For all objects but the receiver, check that the cache is empty.
1509 EnumLengthUntagged(enum_length, map);
1510 Cbnz(enum_length, call_runtime);
1511
1512 Bind(&start);
1513
1514 // Check that there are no elements. Register current_object contains the
1515 // current JS object we've reached through the prototype chain.
1516 Label no_elements;
1517 Ldr(current_object, FieldMemOperand(current_object,
1518 JSObject::kElementsOffset));
1519 Cmp(current_object, empty_fixed_array_value);
1520 B(eq, &no_elements);
1521
1522 // Second chance, the object may be using the empty slow element dictionary.
1523 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1524 B(ne, call_runtime);
1525
1526 Bind(&no_elements);
1527 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
1528 Cmp(current_object, null_value);
1529 B(ne, &next);
1530}
1531
1532
1533void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
1534 Register scratch1,
1535 Register scratch2,
1536 Label* no_memento_found) {
Ben Murdochda12d292016-06-02 14:46:10 +01001537 Label map_check;
1538 Label top_check;
Ben Murdochc5610432016-08-08 18:44:38 +01001539 ExternalReference new_space_allocation_top_adr =
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001540 ExternalReference::new_space_allocation_top_address(isolate());
Ben Murdochda12d292016-06-02 14:46:10 +01001541 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
1542 const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001543
Ben Murdochda12d292016-06-02 14:46:10 +01001544 // Bail out if the object is not in new space.
1545 JumpIfNotInNewSpace(receiver, no_memento_found);
1546 Add(scratch1, receiver, kMementoEndOffset);
1547 // If the object is in new space, we need to check whether it is on the same
1548 // page as the current top.
Ben Murdochc5610432016-08-08 18:44:38 +01001549 Mov(scratch2, new_space_allocation_top_adr);
1550 Ldr(scratch2, MemOperand(scratch2));
1551 Eor(scratch2, scratch1, scratch2);
Ben Murdochda12d292016-06-02 14:46:10 +01001552 Tst(scratch2, ~Page::kPageAlignmentMask);
1553 B(eq, &top_check);
1554 // The object is on a different page than allocation top. Bail out if the
1555 // object sits on the page boundary as no memento can follow and we cannot
1556 // touch the memory following it.
1557 Eor(scratch2, scratch1, receiver);
1558 Tst(scratch2, ~Page::kPageAlignmentMask);
1559 B(ne, no_memento_found);
1560 // Continue with the actual map check.
1561 jmp(&map_check);
1562 // If top is on the same page as the current object, we need to check whether
1563 // we are below top.
1564 bind(&top_check);
Ben Murdochc5610432016-08-08 18:44:38 +01001565 Mov(scratch2, new_space_allocation_top_adr);
1566 Ldr(scratch2, MemOperand(scratch2));
1567 Cmp(scratch1, scratch2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001568 B(gt, no_memento_found);
Ben Murdochda12d292016-06-02 14:46:10 +01001569 // Memento map check.
1570 bind(&map_check);
1571 Ldr(scratch1, MemOperand(receiver, kMementoMapOffset));
1572 Cmp(scratch1, Operand(isolate()->factory()->allocation_memento_map()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001573}
1574
1575
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001576void MacroAssembler::InNewSpace(Register object,
1577 Condition cond,
1578 Label* branch) {
1579 DCHECK(cond == eq || cond == ne);
1580 UseScratchRegisterScope temps(this);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001581 const int mask =
1582 (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
1583 CheckPageFlag(object, temps.AcquireSameSizeAs(object), mask, cond, branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001584}
1585
1586
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001587void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
1588 if (emit_debug_code()) {
1589 STATIC_ASSERT(kSmiTag == 0);
1590 Tst(object, kSmiTagMask);
1591 Check(eq, reason);
1592 }
1593}
1594
1595
1596void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
1597 if (emit_debug_code()) {
1598 STATIC_ASSERT(kSmiTag == 0);
1599 Tst(object, kSmiTagMask);
1600 Check(ne, reason);
1601 }
1602}
1603
1604
1605void MacroAssembler::AssertName(Register object) {
1606 if (emit_debug_code()) {
1607 AssertNotSmi(object, kOperandIsASmiAndNotAName);
1608
1609 UseScratchRegisterScope temps(this);
1610 Register temp = temps.AcquireX();
1611
1612 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1613 CompareInstanceType(temp, temp, LAST_NAME_TYPE);
1614 Check(ls, kOperandIsNotAName);
1615 }
1616}
1617
1618
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001619void MacroAssembler::AssertFunction(Register object) {
1620 if (emit_debug_code()) {
1621 AssertNotSmi(object, kOperandIsASmiAndNotAFunction);
1622
1623 UseScratchRegisterScope temps(this);
1624 Register temp = temps.AcquireX();
1625
1626 CompareObjectType(object, temp, temp, JS_FUNCTION_TYPE);
1627 Check(eq, kOperandIsNotAFunction);
1628 }
1629}
1630
1631
1632void MacroAssembler::AssertBoundFunction(Register object) {
1633 if (emit_debug_code()) {
1634 AssertNotSmi(object, kOperandIsASmiAndNotABoundFunction);
1635
1636 UseScratchRegisterScope temps(this);
1637 Register temp = temps.AcquireX();
1638
1639 CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
1640 Check(eq, kOperandIsNotABoundFunction);
1641 }
1642}
1643
Ben Murdochc5610432016-08-08 18:44:38 +01001644void MacroAssembler::AssertGeneratorObject(Register object) {
1645 if (emit_debug_code()) {
1646 AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
1647
1648 UseScratchRegisterScope temps(this);
1649 Register temp = temps.AcquireX();
1650
1651 CompareObjectType(object, temp, temp, JS_GENERATOR_OBJECT_TYPE);
1652 Check(eq, kOperandIsNotAGeneratorObject);
1653 }
1654}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001655
Ben Murdoch097c5b22016-05-18 11:27:45 +01001656void MacroAssembler::AssertReceiver(Register object) {
1657 if (emit_debug_code()) {
1658 AssertNotSmi(object, kOperandIsASmiAndNotAReceiver);
1659
1660 UseScratchRegisterScope temps(this);
1661 Register temp = temps.AcquireX();
1662
1663 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1664 CompareObjectType(object, temp, temp, FIRST_JS_RECEIVER_TYPE);
1665 Check(hs, kOperandIsNotAReceiver);
1666 }
1667}
1668
1669
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001670void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1671 Register scratch) {
1672 if (emit_debug_code()) {
1673 Label done_checking;
1674 AssertNotSmi(object);
1675 JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
1676 Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1677 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1678 Assert(eq, kExpectedUndefinedOrCell);
1679 Bind(&done_checking);
1680 }
1681}
1682
1683
1684void MacroAssembler::AssertString(Register object) {
1685 if (emit_debug_code()) {
1686 UseScratchRegisterScope temps(this);
1687 Register temp = temps.AcquireX();
1688 STATIC_ASSERT(kSmiTag == 0);
1689 Tst(object, kSmiTagMask);
1690 Check(ne, kOperandIsASmiAndNotAString);
1691 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1692 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
1693 Check(lo, kOperandIsNotAString);
1694 }
1695}
1696
1697
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001698void MacroAssembler::AssertPositiveOrZero(Register value) {
1699 if (emit_debug_code()) {
1700 Label done;
1701 int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
1702 Tbz(value, sign_bit, &done);
1703 Abort(kUnexpectedNegativeValue);
1704 Bind(&done);
1705 }
1706}
1707
Ben Murdochda12d292016-06-02 14:46:10 +01001708void MacroAssembler::AssertNotNumber(Register value) {
1709 if (emit_debug_code()) {
1710 STATIC_ASSERT(kSmiTag == 0);
1711 Tst(value, kSmiTagMask);
1712 Check(ne, kOperandIsANumber);
1713 Label done;
1714 JumpIfNotHeapNumber(value, &done);
1715 Abort(kOperandIsANumber);
1716 Bind(&done);
1717 }
1718}
1719
Ben Murdoch097c5b22016-05-18 11:27:45 +01001720void MacroAssembler::AssertNumber(Register value) {
1721 if (emit_debug_code()) {
1722 Label done;
1723 JumpIfSmi(value, &done);
1724 JumpIfHeapNumber(value, &done);
1725 Abort(kOperandIsNotANumber);
1726 Bind(&done);
1727 }
1728}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001729
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001730void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1731 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1732 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1733}
1734
1735
1736void MacroAssembler::TailCallStub(CodeStub* stub) {
1737 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1738}
1739
1740
1741void MacroAssembler::CallRuntime(const Runtime::Function* f,
1742 int num_arguments,
1743 SaveFPRegsMode save_doubles) {
1744 // All arguments must be on the stack before this function is called.
1745 // x0 holds the return value after the call.
1746
1747 // Check that the number of arguments matches what the function expects.
1748 // If f->nargs is -1, the function can accept a variable number of arguments.
1749 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1750
1751 // Place the necessary arguments.
1752 Mov(x0, num_arguments);
1753 Mov(x1, ExternalReference(f, isolate()));
1754
1755 CEntryStub stub(isolate(), 1, save_doubles);
1756 CallStub(&stub);
1757}
1758
1759
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001760void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1761 int num_arguments) {
1762 Mov(x0, num_arguments);
1763 Mov(x1, ext);
1764
1765 CEntryStub stub(isolate(), 1);
1766 CallStub(&stub);
1767}
1768
1769
1770void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
1771 Mov(x1, builtin);
1772 CEntryStub stub(isolate(), 1);
1773 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1774}
1775
1776
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001777void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1778 const Runtime::Function* function = Runtime::FunctionForId(fid);
1779 DCHECK_EQ(1, function->result_size);
1780 if (function->nargs >= 0) {
1781 // TODO(1236192): Most runtime routines don't need the number of
1782 // arguments passed in because it is constant. At some point we
1783 // should remove this need and make the runtime routine entry code
1784 // smarter.
1785 Mov(x0, function->nargs);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001786 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001787 JumpToExternalReference(ExternalReference(fid, isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001788}
1789
1790
1791void MacroAssembler::InitializeNewString(Register string,
1792 Register length,
1793 Heap::RootListIndex map_index,
1794 Register scratch1,
1795 Register scratch2) {
1796 DCHECK(!AreAliased(string, length, scratch1, scratch2));
1797 LoadRoot(scratch2, map_index);
1798 SmiTag(scratch1, length);
1799 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1800
1801 Mov(scratch2, String::kEmptyHashField);
1802 Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1803 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
1804}
1805
1806
1807int MacroAssembler::ActivationFrameAlignment() {
1808#if V8_HOST_ARCH_ARM64
1809 // Running on the real platform. Use the alignment as mandated by the local
1810 // environment.
1811 // Note: This will break if we ever start generating snapshots on one ARM
1812 // platform for another ARM platform with a different alignment.
1813 return base::OS::ActivationFrameAlignment();
1814#else // V8_HOST_ARCH_ARM64
1815 // If we are using the simulator then we should always align to the expected
1816 // alignment. As the simulator is used to generate snapshots we do not know
1817 // if the target platform will need alignment, so this is controlled from a
1818 // flag.
1819 return FLAG_sim_stack_alignment;
1820#endif // V8_HOST_ARCH_ARM64
1821}
1822
1823
1824void MacroAssembler::CallCFunction(ExternalReference function,
1825 int num_of_reg_args) {
1826 CallCFunction(function, num_of_reg_args, 0);
1827}
1828
1829
1830void MacroAssembler::CallCFunction(ExternalReference function,
1831 int num_of_reg_args,
1832 int num_of_double_args) {
1833 UseScratchRegisterScope temps(this);
1834 Register temp = temps.AcquireX();
1835 Mov(temp, function);
1836 CallCFunction(temp, num_of_reg_args, num_of_double_args);
1837}
1838
1839
1840void MacroAssembler::CallCFunction(Register function,
1841 int num_of_reg_args,
1842 int num_of_double_args) {
1843 DCHECK(has_frame());
1844 // We can pass 8 integer arguments in registers. If we need to pass more than
1845 // that, we'll need to implement support for passing them on the stack.
1846 DCHECK(num_of_reg_args <= 8);
1847
1848 // If we're passing doubles, we're limited to the following prototypes
1849 // (defined by ExternalReference::Type):
1850 // BUILTIN_COMPARE_CALL: int f(double, double)
1851 // BUILTIN_FP_FP_CALL: double f(double, double)
1852 // BUILTIN_FP_CALL: double f(double)
1853 // BUILTIN_FP_INT_CALL: double f(double, int)
1854 if (num_of_double_args > 0) {
1855 DCHECK(num_of_reg_args <= 1);
1856 DCHECK((num_of_double_args + num_of_reg_args) <= 2);
1857 }
1858
1859
1860 // If the stack pointer is not csp, we need to derive an aligned csp from the
1861 // current stack pointer.
1862 const Register old_stack_pointer = StackPointer();
1863 if (!csp.Is(old_stack_pointer)) {
1864 AssertStackConsistency();
1865
1866 int sp_alignment = ActivationFrameAlignment();
1867 // The ABI mandates at least 16-byte alignment.
1868 DCHECK(sp_alignment >= 16);
1869 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
1870
1871 // The current stack pointer is a callee saved register, and is preserved
1872 // across the call.
1873 DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
1874
1875 // Align and synchronize the system stack pointer with jssp.
1876 Bic(csp, old_stack_pointer, sp_alignment - 1);
1877 SetStackPointer(csp);
1878 }
1879
1880 // Call directly. The function called cannot cause a GC, or allow preemption,
1881 // so the return address in the link register stays correct.
1882 Call(function);
1883
1884 if (!csp.Is(old_stack_pointer)) {
1885 if (emit_debug_code()) {
1886 // Because the stack pointer must be aligned on a 16-byte boundary, the
1887 // aligned csp can be up to 12 bytes below the jssp. This is the case
1888 // where we only pushed one W register on top of an aligned jssp.
1889 UseScratchRegisterScope temps(this);
1890 Register temp = temps.AcquireX();
1891 DCHECK(ActivationFrameAlignment() == 16);
1892 Sub(temp, csp, old_stack_pointer);
1893 // We want temp <= 0 && temp >= -12.
1894 Cmp(temp, 0);
1895 Ccmp(temp, -12, NFlag, le);
1896 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
1897 }
1898 SetStackPointer(old_stack_pointer);
1899 }
1900}
1901
1902
1903void MacroAssembler::Jump(Register target) {
1904 Br(target);
1905}
1906
1907
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001908void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
1909 Condition cond) {
1910 if (cond == nv) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001911 UseScratchRegisterScope temps(this);
1912 Register temp = temps.AcquireX();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001913 Label done;
1914 if (cond != al) B(NegateCondition(cond), &done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001915 Mov(temp, Operand(target, rmode));
1916 Br(temp);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001917 Bind(&done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001918}
1919
1920
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001921void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
1922 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001923 DCHECK(!RelocInfo::IsCodeTarget(rmode));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001924 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001925}
1926
1927
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001928void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
1929 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001930 DCHECK(RelocInfo::IsCodeTarget(rmode));
1931 AllowDeferredHandleDereference embedding_raw_address;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001932 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001933}
1934
1935
1936void MacroAssembler::Call(Register target) {
1937 BlockPoolsScope scope(this);
1938#ifdef DEBUG
1939 Label start_call;
1940 Bind(&start_call);
1941#endif
1942
1943 Blr(target);
1944
1945#ifdef DEBUG
1946 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1947#endif
1948}
1949
1950
1951void MacroAssembler::Call(Label* target) {
1952 BlockPoolsScope scope(this);
1953#ifdef DEBUG
1954 Label start_call;
1955 Bind(&start_call);
1956#endif
1957
1958 Bl(target);
1959
1960#ifdef DEBUG
1961 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1962#endif
1963}
1964
1965
1966// MacroAssembler::CallSize is sensitive to changes in this function, as it
1967// requires to know how many instructions are used to branch to the target.
1968void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
1969 BlockPoolsScope scope(this);
1970#ifdef DEBUG
1971 Label start_call;
1972 Bind(&start_call);
1973#endif
1974 // Statement positions are expected to be recorded when the target
1975 // address is loaded.
1976 positions_recorder()->WriteRecordedPositions();
1977
1978 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1979 DCHECK(rmode != RelocInfo::NONE32);
1980
1981 UseScratchRegisterScope temps(this);
1982 Register temp = temps.AcquireX();
1983
1984 if (rmode == RelocInfo::NONE64) {
1985 // Addresses are 48 bits so we never need to load the upper 16 bits.
1986 uint64_t imm = reinterpret_cast<uint64_t>(target);
1987 // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
1988 DCHECK(((imm >> 48) & 0xffff) == 0);
1989 movz(temp, (imm >> 0) & 0xffff, 0);
1990 movk(temp, (imm >> 16) & 0xffff, 16);
1991 movk(temp, (imm >> 32) & 0xffff, 32);
1992 } else {
1993 Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
1994 }
1995 Blr(temp);
1996#ifdef DEBUG
1997 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
1998#endif
1999}
2000
2001
2002void MacroAssembler::Call(Handle<Code> code,
2003 RelocInfo::Mode rmode,
2004 TypeFeedbackId ast_id) {
2005#ifdef DEBUG
2006 Label start_call;
2007 Bind(&start_call);
2008#endif
2009
2010 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
2011 SetRecordedAstId(ast_id);
2012 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2013 }
2014
2015 AllowDeferredHandleDereference embedding_raw_address;
2016 Call(reinterpret_cast<Address>(code.location()), rmode);
2017
2018#ifdef DEBUG
2019 // Check the size of the code generated.
2020 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
2021#endif
2022}
2023
2024
2025int MacroAssembler::CallSize(Register target) {
2026 USE(target);
2027 return kInstructionSize;
2028}
2029
2030
2031int MacroAssembler::CallSize(Label* target) {
2032 USE(target);
2033 return kInstructionSize;
2034}
2035
2036
2037int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
2038 USE(target);
2039
2040 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2041 DCHECK(rmode != RelocInfo::NONE32);
2042
2043 if (rmode == RelocInfo::NONE64) {
2044 return kCallSizeWithoutRelocation;
2045 } else {
2046 return kCallSizeWithRelocation;
2047 }
2048}
2049
2050
2051int MacroAssembler::CallSize(Handle<Code> code,
2052 RelocInfo::Mode rmode,
2053 TypeFeedbackId ast_id) {
2054 USE(code);
2055 USE(ast_id);
2056
2057 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2058 DCHECK(rmode != RelocInfo::NONE32);
2059
2060 if (rmode == RelocInfo::NONE64) {
2061 return kCallSizeWithoutRelocation;
2062 } else {
2063 return kCallSizeWithRelocation;
2064 }
2065}
2066
2067
2068void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
2069 SmiCheckType smi_check_type) {
2070 Label on_not_heap_number;
2071
2072 if (smi_check_type == DO_SMI_CHECK) {
2073 JumpIfSmi(object, &on_not_heap_number);
2074 }
2075
2076 AssertNotSmi(object);
2077
2078 UseScratchRegisterScope temps(this);
2079 Register temp = temps.AcquireX();
2080 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2081 JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
2082
2083 Bind(&on_not_heap_number);
2084}
2085
2086
2087void MacroAssembler::JumpIfNotHeapNumber(Register object,
2088 Label* on_not_heap_number,
2089 SmiCheckType smi_check_type) {
2090 if (smi_check_type == DO_SMI_CHECK) {
2091 JumpIfSmi(object, on_not_heap_number);
2092 }
2093
2094 AssertNotSmi(object);
2095
2096 UseScratchRegisterScope temps(this);
2097 Register temp = temps.AcquireX();
2098 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2099 JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
2100}
2101
2102
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002103void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
2104 FPRegister value,
2105 FPRegister scratch_d,
2106 Label* on_successful_conversion,
2107 Label* on_failed_conversion) {
2108 // Convert to an int and back again, then compare with the original value.
2109 Fcvtzs(as_int, value);
2110 Scvtf(scratch_d, as_int);
2111 Fcmp(value, scratch_d);
2112
2113 if (on_successful_conversion) {
2114 B(on_successful_conversion, eq);
2115 }
2116 if (on_failed_conversion) {
2117 B(on_failed_conversion, ne);
2118 }
2119}
2120
2121
2122void MacroAssembler::TestForMinusZero(DoubleRegister input) {
2123 UseScratchRegisterScope temps(this);
2124 Register temp = temps.AcquireX();
2125 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
2126 // cause overflow.
2127 Fmov(temp, input);
2128 Cmp(temp, 1);
2129}
2130
2131
2132void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
2133 Label* on_negative_zero) {
2134 TestForMinusZero(input);
2135 B(vs, on_negative_zero);
2136}
2137
2138
2139void MacroAssembler::JumpIfMinusZero(Register input,
2140 Label* on_negative_zero) {
2141 DCHECK(input.Is64Bits());
2142 // Floating point value is in an integer register. Detect -0.0 by subtracting
2143 // 1 (cmp), which will cause overflow.
2144 Cmp(input, 1);
2145 B(vs, on_negative_zero);
2146}
2147
2148
2149void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
2150 // Clamp the value to [0..255].
2151 Cmp(input.W(), Operand(input.W(), UXTB));
2152 // If input < input & 0xff, it must be < 0, so saturate to 0.
2153 Csel(output.W(), wzr, input.W(), lt);
2154 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
2155 Csel(output.W(), output.W(), 255, le);
2156}
2157
2158
2159void MacroAssembler::ClampInt32ToUint8(Register in_out) {
2160 ClampInt32ToUint8(in_out, in_out);
2161}
2162
2163
2164void MacroAssembler::ClampDoubleToUint8(Register output,
2165 DoubleRegister input,
2166 DoubleRegister dbl_scratch) {
2167 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
2168 // - Inputs lower than 0 (including -infinity) produce 0.
2169 // - Inputs higher than 255 (including +infinity) produce 255.
2170 // Also, it seems that PIXEL types use round-to-nearest rather than
2171 // round-towards-zero.
2172
2173 // Squash +infinity before the conversion, since Fcvtnu will normally
2174 // convert it to 0.
2175 Fmov(dbl_scratch, 255);
2176 Fmin(dbl_scratch, dbl_scratch, input);
2177
2178 // Convert double to unsigned integer. Values less than zero become zero.
2179 // Values greater than 255 have already been clamped to 255.
2180 Fcvtnu(output, dbl_scratch);
2181}
2182
2183
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002184void MacroAssembler::CopyBytes(Register dst,
2185 Register src,
2186 Register length,
2187 Register scratch,
2188 CopyHint hint) {
2189 UseScratchRegisterScope temps(this);
2190 Register tmp1 = temps.AcquireX();
2191 Register tmp2 = temps.AcquireX();
2192 DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
2193 DCHECK(!AreAliased(src, dst, csp));
2194
2195 if (emit_debug_code()) {
2196 // Check copy length.
2197 Cmp(length, 0);
2198 Assert(ge, kUnexpectedNegativeValue);
2199
2200 // Check src and dst buffers don't overlap.
2201 Add(scratch, src, length); // Calculate end of src buffer.
2202 Cmp(scratch, dst);
2203 Add(scratch, dst, length); // Calculate end of dst buffer.
2204 Ccmp(scratch, src, ZFlag, gt);
2205 Assert(le, kCopyBuffersOverlap);
2206 }
2207
2208 Label short_copy, short_loop, bulk_loop, done;
2209
2210 if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
2211 Register bulk_length = scratch;
2212 int pair_size = 2 * kXRegSize;
2213 int pair_mask = pair_size - 1;
2214
2215 Bic(bulk_length, length, pair_mask);
2216 Cbz(bulk_length, &short_copy);
2217 Bind(&bulk_loop);
2218 Sub(bulk_length, bulk_length, pair_size);
2219 Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
2220 Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
2221 Cbnz(bulk_length, &bulk_loop);
2222
2223 And(length, length, pair_mask);
2224 }
2225
2226 Bind(&short_copy);
2227 Cbz(length, &done);
2228 Bind(&short_loop);
2229 Sub(length, length, 1);
2230 Ldrb(tmp1, MemOperand(src, 1, PostIndex));
2231 Strb(tmp1, MemOperand(dst, 1, PostIndex));
2232 Cbnz(length, &short_loop);
2233
2234
2235 Bind(&done);
2236}
2237
2238
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002239void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
2240 Register end_address,
2241 Register filler) {
2242 DCHECK(!current_address.Is(csp));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002243 UseScratchRegisterScope temps(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002244 Register distance_in_words = temps.AcquireX();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002245 Label done;
2246
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002247 // Calculate the distance. If it's <= zero then there's nothing to do.
2248 Subs(distance_in_words, end_address, current_address);
2249 B(le, &done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002250
2251 // There's at least one field to fill, so do this unconditionally.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002252 Str(filler, MemOperand(current_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002253
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002254 // If the distance_in_words consists of odd number of words we advance
2255 // start_address by one word, otherwise the pairs loop will ovwerite the
2256 // field that was stored above.
2257 And(distance_in_words, distance_in_words, kPointerSize);
2258 Add(current_address, current_address, distance_in_words);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002259
2260 // Store filler to memory in pairs.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002261 Label loop, entry;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002262 B(&entry);
2263 Bind(&loop);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002264 Stp(filler, filler, MemOperand(current_address, 2 * kPointerSize, PostIndex));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002265 Bind(&entry);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002266 Cmp(current_address, end_address);
2267 B(lo, &loop);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002268
2269 Bind(&done);
2270}
2271
2272
2273void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
2274 Register first, Register second, Register scratch1, Register scratch2,
2275 Label* failure, SmiCheckType smi_check) {
2276 if (smi_check == DO_SMI_CHECK) {
2277 JumpIfEitherSmi(first, second, failure);
2278 } else if (emit_debug_code()) {
2279 DCHECK(smi_check == DONT_DO_SMI_CHECK);
2280 Label not_smi;
2281 JumpIfEitherSmi(first, second, NULL, &not_smi);
2282
2283 // At least one input is a smi, but the flags indicated a smi check wasn't
2284 // needed.
2285 Abort(kUnexpectedSmi);
2286
2287 Bind(&not_smi);
2288 }
2289
2290 // Test that both first and second are sequential one-byte strings.
2291 Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2292 Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2293 Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2294 Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2295
2296 JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
2297 scratch2, failure);
2298}
2299
2300
2301void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
2302 Register first, Register second, Register scratch1, Register scratch2,
2303 Label* failure) {
2304 DCHECK(!AreAliased(scratch1, second));
2305 DCHECK(!AreAliased(scratch1, scratch2));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002306 const int kFlatOneByteStringMask =
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002307 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002308 const int kFlatOneByteStringTag =
2309 kStringTag | kOneByteStringTag | kSeqStringTag;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002310 And(scratch1, first, kFlatOneByteStringMask);
2311 And(scratch2, second, kFlatOneByteStringMask);
2312 Cmp(scratch1, kFlatOneByteStringTag);
2313 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2314 B(ne, failure);
2315}
2316
2317
2318void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
2319 Register scratch,
2320 Label* failure) {
2321 const int kFlatOneByteStringMask =
2322 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2323 const int kFlatOneByteStringTag =
2324 kStringTag | kOneByteStringTag | kSeqStringTag;
2325 And(scratch, type, kFlatOneByteStringMask);
2326 Cmp(scratch, kFlatOneByteStringTag);
2327 B(ne, failure);
2328}
2329
2330
2331void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2332 Register first, Register second, Register scratch1, Register scratch2,
2333 Label* failure) {
2334 DCHECK(!AreAliased(first, second, scratch1, scratch2));
2335 const int kFlatOneByteStringMask =
2336 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2337 const int kFlatOneByteStringTag =
2338 kStringTag | kOneByteStringTag | kSeqStringTag;
2339 And(scratch1, first, kFlatOneByteStringMask);
2340 And(scratch2, second, kFlatOneByteStringMask);
2341 Cmp(scratch1, kFlatOneByteStringTag);
2342 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2343 B(ne, failure);
2344}
2345
2346
2347void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
2348 Label* not_unique_name) {
2349 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2350 // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
2351 // continue
2352 // } else {
2353 // goto not_unique_name
2354 // }
2355 Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
2356 Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
2357 B(ne, not_unique_name);
2358}
2359
Ben Murdochda12d292016-06-02 14:46:10 +01002360void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
2361 Register caller_args_count_reg,
2362 Register scratch0, Register scratch1) {
2363#if DEBUG
2364 if (callee_args_count.is_reg()) {
2365 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
2366 scratch1));
2367 } else {
2368 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
2369 }
2370#endif
2371
2372 // Calculate the end of destination area where we will put the arguments
2373 // after we drop current frame. We add kPointerSize to count the receiver
2374 // argument which is not included into formal parameters count.
2375 Register dst_reg = scratch0;
2376 __ add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
2377 __ add(dst_reg, dst_reg,
2378 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
2379
2380 Register src_reg = caller_args_count_reg;
2381 // Calculate the end of source area. +kPointerSize is for the receiver.
2382 if (callee_args_count.is_reg()) {
2383 add(src_reg, jssp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
2384 add(src_reg, src_reg, Operand(kPointerSize));
2385 } else {
2386 add(src_reg, jssp,
2387 Operand((callee_args_count.immediate() + 1) * kPointerSize));
2388 }
2389
2390 if (FLAG_debug_code) {
2391 __ Cmp(src_reg, dst_reg);
2392 __ Check(lo, kStackAccessBelowStackPointer);
2393 }
2394
2395 // Restore caller's frame pointer and return address now as they will be
2396 // overwritten by the copying loop.
2397 __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
2398 __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2399
2400 // Now copy callee arguments to the caller frame going backwards to avoid
2401 // callee arguments corruption (source and destination areas could overlap).
2402
2403 // Both src_reg and dst_reg are pointing to the word after the one to copy,
2404 // so they must be pre-decremented in the loop.
2405 Register tmp_reg = scratch1;
2406 Label loop, entry;
2407 __ B(&entry);
2408 __ bind(&loop);
2409 __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
2410 __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
2411 __ bind(&entry);
2412 __ Cmp(jssp, src_reg);
2413 __ B(ne, &loop);
2414
2415 // Leave current frame.
2416 __ Mov(jssp, dst_reg);
2417 __ SetStackPointer(jssp);
2418 __ AssertStackConsistency();
2419}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002420
2421void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2422 const ParameterCount& actual,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002423 Label* done,
2424 InvokeFlag flag,
2425 bool* definitely_mismatches,
2426 const CallWrapper& call_wrapper) {
2427 bool definitely_matches = false;
2428 *definitely_mismatches = false;
2429 Label regular_invoke;
2430
2431 // Check whether the expected and actual arguments count match. If not,
2432 // setup registers according to contract with ArgumentsAdaptorTrampoline:
2433 // x0: actual arguments count.
2434 // x1: function (passed through to callee).
2435 // x2: expected arguments count.
2436
2437 // The code below is made a lot easier because the calling code already sets
2438 // up actual and expected registers according to the contract if values are
2439 // passed in registers.
2440 DCHECK(actual.is_immediate() || actual.reg().is(x0));
2441 DCHECK(expected.is_immediate() || expected.reg().is(x2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002442
2443 if (expected.is_immediate()) {
2444 DCHECK(actual.is_immediate());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002445 Mov(x0, actual.immediate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002446 if (expected.immediate() == actual.immediate()) {
2447 definitely_matches = true;
2448
2449 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002450 if (expected.immediate() ==
2451 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2452 // Don't worry about adapting arguments for builtins that
2453 // don't want that done. Skip adaption code by making it look
2454 // like we have a match between expected and actual number of
2455 // arguments.
2456 definitely_matches = true;
2457 } else {
2458 *definitely_mismatches = true;
2459 // Set up x2 for the argument adaptor.
2460 Mov(x2, expected.immediate());
2461 }
2462 }
2463
2464 } else { // expected is a register.
2465 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2466 : Operand(actual.reg());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002467 Mov(x0, actual_op);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002468 // If actual == expected perform a regular invocation.
2469 Cmp(expected.reg(), actual_op);
2470 B(eq, &regular_invoke);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002471 }
2472
2473 // If the argument counts may mismatch, generate a call to the argument
2474 // adaptor.
2475 if (!definitely_matches) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002476 Handle<Code> adaptor =
2477 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2478 if (flag == CALL_FUNCTION) {
2479 call_wrapper.BeforeCall(CallSize(adaptor));
2480 Call(adaptor);
2481 call_wrapper.AfterCall();
2482 if (!*definitely_mismatches) {
2483 // If the arg counts don't match, no extra code is emitted by
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002484 // MAsm::InvokeFunctionCode and we can just fall through.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002485 B(done);
2486 }
2487 } else {
2488 Jump(adaptor, RelocInfo::CODE_TARGET);
2489 }
2490 }
2491 Bind(&regular_invoke);
2492}
2493
2494
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002495void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
2496 const ParameterCount& expected,
2497 const ParameterCount& actual) {
2498 Label skip_flooding;
2499 ExternalReference step_in_enabled =
2500 ExternalReference::debug_step_in_enabled_address(isolate());
2501 Mov(x4, Operand(step_in_enabled));
2502 ldrb(x4, MemOperand(x4));
2503 CompareAndBranch(x4, Operand(0), eq, &skip_flooding);
2504 {
2505 FrameScope frame(this,
2506 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
2507 if (expected.is_reg()) {
2508 SmiTag(expected.reg());
2509 Push(expected.reg());
2510 }
2511 if (actual.is_reg()) {
2512 SmiTag(actual.reg());
2513 Push(actual.reg());
2514 }
2515 if (new_target.is_valid()) {
2516 Push(new_target);
2517 }
2518 Push(fun);
2519 Push(fun);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002520 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002521 Pop(fun);
2522 if (new_target.is_valid()) {
2523 Pop(new_target);
2524 }
2525 if (actual.is_reg()) {
2526 Pop(actual.reg());
2527 SmiUntag(actual.reg());
2528 }
2529 if (expected.is_reg()) {
2530 Pop(expected.reg());
2531 SmiUntag(expected.reg());
2532 }
2533 }
2534 bind(&skip_flooding);
2535}
2536
2537
2538void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
2539 const ParameterCount& expected,
2540 const ParameterCount& actual,
2541 InvokeFlag flag,
2542 const CallWrapper& call_wrapper) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002543 // You can't call a function without a valid frame.
2544 DCHECK(flag == JUMP_FUNCTION || has_frame());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002545 DCHECK(function.is(x1));
2546 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
2547
2548 FloodFunctionIfStepping(function, new_target, expected, actual);
2549
2550 // Clear the new.target register if not given.
2551 if (!new_target.is_valid()) {
2552 LoadRoot(x3, Heap::kUndefinedValueRootIndex);
2553 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002554
2555 Label done;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002556 bool definitely_mismatches = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002557 InvokePrologue(expected, actual, &done, flag, &definitely_mismatches,
2558 call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002559
2560 // If we are certain that actual != expected, then we know InvokePrologue will
2561 // have handled the call through the argument adaptor mechanism.
2562 // The called function expects the call kind in x5.
2563 if (!definitely_mismatches) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002564 // We call indirectly through the code field in the function to
2565 // allow recompilation to take effect without changing any of the
2566 // call sites.
2567 Register code = x4;
2568 Ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002569 if (flag == CALL_FUNCTION) {
2570 call_wrapper.BeforeCall(CallSize(code));
2571 Call(code);
2572 call_wrapper.AfterCall();
2573 } else {
2574 DCHECK(flag == JUMP_FUNCTION);
2575 Jump(code);
2576 }
2577 }
2578
2579 // Continue here if InvokePrologue does handle the invocation due to
2580 // mismatched parameter counts.
2581 Bind(&done);
2582}
2583
2584
2585void MacroAssembler::InvokeFunction(Register function,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002586 Register new_target,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002587 const ParameterCount& actual,
2588 InvokeFlag flag,
2589 const CallWrapper& call_wrapper) {
2590 // You can't call a function without a valid frame.
2591 DCHECK(flag == JUMP_FUNCTION || has_frame());
2592
2593 // Contract with called JS functions requires that function is passed in x1.
2594 // (See FullCodeGenerator::Generate().)
2595 DCHECK(function.is(x1));
2596
2597 Register expected_reg = x2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002598
2599 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2600 // The number of arguments is stored as an int32_t, and -1 is a marker
2601 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2602 // extension to correctly handle it.
2603 Ldr(expected_reg, FieldMemOperand(function,
2604 JSFunction::kSharedFunctionInfoOffset));
2605 Ldrsw(expected_reg,
2606 FieldMemOperand(expected_reg,
2607 SharedFunctionInfo::kFormalParameterCountOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002608
2609 ParameterCount expected(expected_reg);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002610 InvokeFunctionCode(function, new_target, expected, actual, flag,
2611 call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002612}
2613
2614
2615void MacroAssembler::InvokeFunction(Register function,
2616 const ParameterCount& expected,
2617 const ParameterCount& actual,
2618 InvokeFlag flag,
2619 const CallWrapper& call_wrapper) {
2620 // You can't call a function without a valid frame.
2621 DCHECK(flag == JUMP_FUNCTION || has_frame());
2622
2623 // Contract with called JS functions requires that function is passed in x1.
2624 // (See FullCodeGenerator::Generate().)
2625 DCHECK(function.Is(x1));
2626
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002627 // Set up the context.
2628 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2629
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002630 InvokeFunctionCode(function, no_reg, expected, actual, flag, call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002631}
2632
2633
2634void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2635 const ParameterCount& expected,
2636 const ParameterCount& actual,
2637 InvokeFlag flag,
2638 const CallWrapper& call_wrapper) {
2639 // Contract with called JS functions requires that function is passed in x1.
2640 // (See FullCodeGenerator::Generate().)
2641 __ LoadObject(x1, function);
2642 InvokeFunction(x1, expected, actual, flag, call_wrapper);
2643}
2644
2645
2646void MacroAssembler::TryConvertDoubleToInt64(Register result,
2647 DoubleRegister double_input,
2648 Label* done) {
2649 // Try to convert with an FPU convert instruction. It's trivial to compute
2650 // the modulo operation on an integer register so we convert to a 64-bit
2651 // integer.
2652 //
2653 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
2654 // when the double is out of range. NaNs and infinities will be converted to 0
2655 // (as ECMA-262 requires).
2656 Fcvtzs(result.X(), double_input);
2657
2658 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
2659 // representable using a double, so if the result is one of those then we know
2660 // that saturation occured, and we need to manually handle the conversion.
2661 //
2662 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2663 // 1 will cause signed overflow.
2664 Cmp(result.X(), 1);
2665 Ccmp(result.X(), -1, VFlag, vc);
2666
2667 B(vc, done);
2668}
2669
2670
2671void MacroAssembler::TruncateDoubleToI(Register result,
2672 DoubleRegister double_input) {
2673 Label done;
2674
2675 // Try to convert the double to an int64. If successful, the bottom 32 bits
2676 // contain our truncated int32 result.
2677 TryConvertDoubleToInt64(result, double_input, &done);
2678
2679 const Register old_stack_pointer = StackPointer();
2680 if (csp.Is(old_stack_pointer)) {
2681 // This currently only happens during compiler-unittest. If it arises
2682 // during regular code generation the DoubleToI stub should be updated to
2683 // cope with csp and have an extra parameter indicating which stack pointer
2684 // it should use.
2685 Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
2686 Mov(jssp, csp);
2687 SetStackPointer(jssp);
2688 }
2689
2690 // If we fell through then inline version didn't succeed - call stub instead.
2691 Push(lr, double_input);
2692
2693 DoubleToIStub stub(isolate(),
2694 jssp,
2695 result,
2696 0,
2697 true, // is_truncating
2698 true); // skip_fastpath
2699 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2700
2701 DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
2702 Pop(xzr, lr); // xzr to drop the double input on the stack.
2703
2704 if (csp.Is(old_stack_pointer)) {
2705 Mov(csp, jssp);
2706 SetStackPointer(csp);
2707 AssertStackConsistency();
2708 Pop(xzr, jssp);
2709 }
2710
2711 Bind(&done);
2712}
2713
2714
2715void MacroAssembler::TruncateHeapNumberToI(Register result,
2716 Register object) {
2717 Label done;
2718 DCHECK(!result.is(object));
2719 DCHECK(jssp.Is(StackPointer()));
2720
2721 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2722
2723 // Try to convert the double to an int64. If successful, the bottom 32 bits
2724 // contain our truncated int32 result.
2725 TryConvertDoubleToInt64(result, fp_scratch, &done);
2726
2727 // If we fell through then inline version didn't succeed - call stub instead.
2728 Push(lr);
2729 DoubleToIStub stub(isolate(),
2730 object,
2731 result,
2732 HeapNumber::kValueOffset - kHeapObjectTag,
2733 true, // is_truncating
2734 true); // skip_fastpath
2735 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2736 Pop(lr);
2737
2738 Bind(&done);
2739}
2740
Ben Murdochda12d292016-06-02 14:46:10 +01002741void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002742 UseScratchRegisterScope temps(this);
Ben Murdochda12d292016-06-02 14:46:10 +01002743 frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002744 Register temp = temps.AcquireX();
Ben Murdochda12d292016-06-02 14:46:10 +01002745 Mov(temp, Smi::FromInt(type));
2746 Push(lr, fp);
2747 Mov(fp, StackPointer());
2748 Claim(frame_slots);
2749 str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002750}
2751
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002752void MacroAssembler::Prologue(bool code_pre_aging) {
2753 if (code_pre_aging) {
2754 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
2755 __ EmitCodeAgeSequence(stub);
2756 } else {
2757 __ EmitFrameSetupForCodeAgePatching();
2758 }
2759}
2760
2761
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002762void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
2763 Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2764 Ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
2765 Ldr(vector,
2766 FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
2767}
2768
2769
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002770void MacroAssembler::EnterFrame(StackFrame::Type type,
2771 bool load_constant_pool_pointer_reg) {
2772 // Out-of-line constant pool not implemented on arm64.
2773 UNREACHABLE();
2774}
2775
2776
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002777void MacroAssembler::EnterFrame(StackFrame::Type type) {
2778 DCHECK(jssp.Is(StackPointer()));
2779 UseScratchRegisterScope temps(this);
2780 Register type_reg = temps.AcquireX();
2781 Register code_reg = temps.AcquireX();
2782
Ben Murdochda12d292016-06-02 14:46:10 +01002783 if (type == StackFrame::INTERNAL) {
2784 Mov(type_reg, Smi::FromInt(type));
2785 Push(lr, fp);
2786 Push(type_reg);
2787 Mov(code_reg, Operand(CodeObject()));
2788 Push(code_reg);
2789 Add(fp, jssp, InternalFrameConstants::kFixedFrameSizeFromFp);
2790 // jssp[4] : lr
2791 // jssp[3] : fp
2792 // jssp[1] : type
2793 // jssp[0] : [code object]
2794 } else {
2795 Mov(type_reg, Smi::FromInt(type));
2796 Push(lr, fp);
2797 Push(type_reg);
2798 Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
2799 // jssp[2] : lr
2800 // jssp[1] : fp
2801 // jssp[0] : type
2802 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002803}
2804
2805
2806void MacroAssembler::LeaveFrame(StackFrame::Type type) {
2807 DCHECK(jssp.Is(StackPointer()));
2808 // Drop the execution stack down to the frame pointer and restore
2809 // the caller frame pointer and return address.
2810 Mov(jssp, fp);
2811 AssertStackConsistency();
2812 Pop(fp, lr);
2813}
2814
2815
2816void MacroAssembler::ExitFramePreserveFPRegs() {
2817 PushCPURegList(kCallerSavedFP);
2818}
2819
2820
2821void MacroAssembler::ExitFrameRestoreFPRegs() {
2822 // Read the registers from the stack without popping them. The stack pointer
2823 // will be reset as part of the unwinding process.
2824 CPURegList saved_fp_regs = kCallerSavedFP;
2825 DCHECK(saved_fp_regs.Count() % 2 == 0);
2826
2827 int offset = ExitFrameConstants::kLastExitFrameField;
2828 while (!saved_fp_regs.IsEmpty()) {
2829 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
2830 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2831 offset -= 2 * kDRegSize;
2832 Ldp(dst1, dst0, MemOperand(fp, offset));
2833 }
2834}
2835
2836
2837void MacroAssembler::EnterExitFrame(bool save_doubles,
2838 const Register& scratch,
2839 int extra_space) {
2840 DCHECK(jssp.Is(StackPointer()));
2841
2842 // Set up the new stack frame.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002843 Push(lr, fp);
2844 Mov(fp, StackPointer());
Ben Murdochda12d292016-06-02 14:46:10 +01002845 Mov(scratch, Smi::FromInt(StackFrame::EXIT));
2846 Push(scratch);
2847 Push(xzr);
2848 Mov(scratch, Operand(CodeObject()));
2849 Push(scratch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002850 // fp[8]: CallerPC (lr)
2851 // fp -> fp[0]: CallerFP (old fp)
Ben Murdochda12d292016-06-02 14:46:10 +01002852 // fp[-8]: STUB marker
2853 // fp[-16]: Space reserved for SPOffset.
2854 // jssp -> fp[-24]: CodeObject()
2855 STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002856 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
2857 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
Ben Murdochda12d292016-06-02 14:46:10 +01002858 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kSPOffset);
2859 STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002860
2861 // Save the frame pointer and context pointer in the top frame.
2862 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2863 isolate())));
2864 Str(fp, MemOperand(scratch));
2865 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2866 isolate())));
2867 Str(cp, MemOperand(scratch));
2868
Ben Murdochda12d292016-06-02 14:46:10 +01002869 STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002870 if (save_doubles) {
2871 ExitFramePreserveFPRegs();
2872 }
2873
2874 // Reserve space for the return address and for user requested memory.
2875 // We do this before aligning to make sure that we end up correctly
2876 // aligned with the minimum of wasted space.
2877 Claim(extra_space + 1, kXRegSize);
2878 // fp[8]: CallerPC (lr)
2879 // fp -> fp[0]: CallerFP (old fp)
Ben Murdochda12d292016-06-02 14:46:10 +01002880 // fp[-8]: STUB marker
2881 // fp[-16]: Space reserved for SPOffset.
2882 // fp[-24]: CodeObject()
2883 // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002884 // jssp[8]: Extra space reserved for caller (if extra_space != 0).
2885 // jssp -> jssp[0]: Space reserved for the return address.
2886
2887 // Align and synchronize the system stack pointer with jssp.
2888 AlignAndSetCSPForFrame();
2889 DCHECK(csp.Is(StackPointer()));
2890
2891 // fp[8]: CallerPC (lr)
2892 // fp -> fp[0]: CallerFP (old fp)
Ben Murdochda12d292016-06-02 14:46:10 +01002893 // fp[-8]: STUB marker
2894 // fp[-16]: Space reserved for SPOffset.
2895 // fp[-24]: CodeObject()
2896 // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002897 // csp[8]: Memory reserved for the caller if extra_space != 0.
2898 // Alignment padding, if necessary.
2899 // csp -> csp[0]: Space reserved for the return address.
2900
2901 // ExitFrame::GetStateForFramePointer expects to find the return address at
2902 // the memory address immediately below the pointer stored in SPOffset.
2903 // It is not safe to derive much else from SPOffset, because the size of the
2904 // padding can vary.
2905 Add(scratch, csp, kXRegSize);
2906 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
2907}
2908
2909
2910// Leave the current exit frame.
2911void MacroAssembler::LeaveExitFrame(bool restore_doubles,
2912 const Register& scratch,
2913 bool restore_context) {
2914 DCHECK(csp.Is(StackPointer()));
2915
2916 if (restore_doubles) {
2917 ExitFrameRestoreFPRegs();
2918 }
2919
2920 // Restore the context pointer from the top frame.
2921 if (restore_context) {
2922 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2923 isolate())));
2924 Ldr(cp, MemOperand(scratch));
2925 }
2926
2927 if (emit_debug_code()) {
2928 // Also emit debug code to clear the cp in the top frame.
2929 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2930 isolate())));
2931 Str(xzr, MemOperand(scratch));
2932 }
2933 // Clear the frame pointer from the top frame.
2934 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2935 isolate())));
2936 Str(xzr, MemOperand(scratch));
2937
2938 // Pop the exit frame.
2939 // fp[8]: CallerPC (lr)
2940 // fp -> fp[0]: CallerFP (old fp)
2941 // fp[...]: The rest of the frame.
2942 Mov(jssp, fp);
2943 SetStackPointer(jssp);
2944 AssertStackConsistency();
2945 Pop(fp, lr);
2946}
2947
2948
2949void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2950 Register scratch1, Register scratch2) {
2951 if (FLAG_native_code_counters && counter->Enabled()) {
2952 Mov(scratch1, value);
2953 Mov(scratch2, ExternalReference(counter));
2954 Str(scratch1, MemOperand(scratch2));
2955 }
2956}
2957
2958
2959void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2960 Register scratch1, Register scratch2) {
2961 DCHECK(value != 0);
2962 if (FLAG_native_code_counters && counter->Enabled()) {
2963 Mov(scratch2, ExternalReference(counter));
2964 Ldr(scratch1, MemOperand(scratch2));
2965 Add(scratch1, scratch1, value);
2966 Str(scratch1, MemOperand(scratch2));
2967 }
2968}
2969
2970
2971void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2972 Register scratch1, Register scratch2) {
2973 IncrementCounter(counter, -value, scratch1, scratch2);
2974}
2975
2976
2977void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2978 if (context_chain_length > 0) {
2979 // Move up the chain of contexts to the context containing the slot.
2980 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2981 for (int i = 1; i < context_chain_length; i++) {
2982 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2983 }
2984 } else {
2985 // Slot is in the current function context. Move it into the
2986 // destination register in case we store into it (the write barrier
2987 // cannot be allowed to destroy the context in cp).
2988 Mov(dst, cp);
2989 }
2990}
2991
2992
2993void MacroAssembler::DebugBreak() {
2994 Mov(x0, 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002995 Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002996 CEntryStub ces(isolate(), 1);
2997 DCHECK(AllowThisStubCall(&ces));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002998 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002999}
3000
3001
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003002void MacroAssembler::PushStackHandler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003003 DCHECK(jssp.Is(StackPointer()));
3004 // Adjust this code if the asserts don't hold.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003005 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003006 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003007
3008 // For the JSEntry handler, we must preserve the live registers x0-x4.
3009 // (See JSEntryStub::GenerateBody().)
3010
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003011 // Link the current handler as the next handler.
3012 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3013 Ldr(x10, MemOperand(x11));
3014 Push(x10);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003015
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003016 // Set this new handler as the current one.
3017 Str(jssp, MemOperand(x11));
3018}
3019
3020
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003021void MacroAssembler::PopStackHandler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003022 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3023 Pop(x10);
3024 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3025 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
3026 Str(x10, MemOperand(x11));
3027}
3028
3029
3030void MacroAssembler::Allocate(int object_size,
3031 Register result,
3032 Register scratch1,
3033 Register scratch2,
3034 Label* gc_required,
3035 AllocationFlags flags) {
3036 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
Ben Murdochc5610432016-08-08 18:44:38 +01003037 DCHECK((flags & ALLOCATION_FOLDED) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003038 if (!FLAG_inline_new) {
3039 if (emit_debug_code()) {
3040 // Trash the registers to simulate an allocation failure.
3041 // We apply salt to the original zap value to easily spot the values.
3042 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3043 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3044 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3045 }
3046 B(gc_required);
3047 return;
3048 }
3049
3050 UseScratchRegisterScope temps(this);
3051 Register scratch3 = temps.AcquireX();
3052
3053 DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
3054 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3055
3056 // Make object size into bytes.
3057 if ((flags & SIZE_IN_WORDS) != 0) {
3058 object_size *= kPointerSize;
3059 }
3060 DCHECK(0 == (object_size & kObjectAlignmentMask));
3061
3062 // Check relative positions of allocation top and limit addresses.
3063 // The values must be adjacent in memory to allow the use of LDP.
3064 ExternalReference heap_allocation_top =
3065 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3066 ExternalReference heap_allocation_limit =
3067 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3068 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3069 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3070 DCHECK((limit - top) == kPointerSize);
3071
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003072 // Set up allocation top address and allocation limit registers.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003073 Register top_address = scratch1;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003074 Register alloc_limit = scratch2;
3075 Register result_end = scratch3;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003076 Mov(top_address, Operand(heap_allocation_top));
3077
3078 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003079 // Load allocation top into result and allocation limit into alloc_limit.
3080 Ldp(result, alloc_limit, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003081 } else {
3082 if (emit_debug_code()) {
3083 // Assert that result actually contains top on entry.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003084 Ldr(alloc_limit, MemOperand(top_address));
3085 Cmp(result, alloc_limit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003086 Check(eq, kUnexpectedAllocationTop);
3087 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003088 // Load allocation limit. Result already contains allocation top.
3089 Ldr(alloc_limit, MemOperand(top_address, limit - top));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003090 }
3091
3092 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3093 // the same alignment on ARM64.
3094 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3095
3096 // Calculate new top and bail out if new space is exhausted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003097 Adds(result_end, result, object_size);
Ben Murdochc5610432016-08-08 18:44:38 +01003098 Ccmp(result_end, alloc_limit, NoFlag, cc);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003099 B(hi, gc_required);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003100
Ben Murdochc5610432016-08-08 18:44:38 +01003101 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
3102 // The top pointer is not updated for allocation folding dominators.
3103 Str(result_end, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003104 }
Ben Murdochc5610432016-08-08 18:44:38 +01003105
3106 // Tag the object.
3107 ObjectTag(result, result);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003108}
3109
3110
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003111void MacroAssembler::Allocate(Register object_size, Register result,
3112 Register result_end, Register scratch,
3113 Label* gc_required, AllocationFlags flags) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003114 if (!FLAG_inline_new) {
3115 if (emit_debug_code()) {
3116 // Trash the registers to simulate an allocation failure.
3117 // We apply salt to the original zap value to easily spot the values.
3118 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003119 Mov(scratch, (kDebugZapValue & ~0xffL) | 0x21L);
3120 Mov(result_end, (kDebugZapValue & ~0xffL) | 0x21L);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003121 }
3122 B(gc_required);
3123 return;
3124 }
3125
3126 UseScratchRegisterScope temps(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003127 Register scratch2 = temps.AcquireX();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003128
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003129 // |object_size| and |result_end| may overlap, other registers must not.
3130 DCHECK(!AreAliased(object_size, result, scratch, scratch2));
3131 DCHECK(!AreAliased(result_end, result, scratch, scratch2));
3132 DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
3133 result_end.Is64Bits());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003134
3135 // Check relative positions of allocation top and limit addresses.
3136 // The values must be adjacent in memory to allow the use of LDP.
3137 ExternalReference heap_allocation_top =
3138 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3139 ExternalReference heap_allocation_limit =
3140 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3141 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3142 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3143 DCHECK((limit - top) == kPointerSize);
3144
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003145 // Set up allocation top address and allocation limit registers.
3146 Register top_address = scratch;
3147 Register alloc_limit = scratch2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003148 Mov(top_address, heap_allocation_top);
3149
3150 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003151 // Load allocation top into result and allocation limit into alloc_limit.
3152 Ldp(result, alloc_limit, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003153 } else {
3154 if (emit_debug_code()) {
3155 // Assert that result actually contains top on entry.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003156 Ldr(alloc_limit, MemOperand(top_address));
3157 Cmp(result, alloc_limit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003158 Check(eq, kUnexpectedAllocationTop);
3159 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003160 // Load allocation limit. Result already contains allocation top.
3161 Ldr(alloc_limit, MemOperand(top_address, limit - top));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003162 }
3163
3164 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3165 // the same alignment on ARM64.
3166 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3167
3168 // Calculate new top and bail out if new space is exhausted
3169 if ((flags & SIZE_IN_WORDS) != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003170 Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003171 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003172 Adds(result_end, result, object_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003173 }
3174
3175 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003176 Tst(result_end, kObjectAlignmentMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003177 Check(eq, kUnalignedAllocationInNewSpace);
3178 }
3179
Ben Murdochc5610432016-08-08 18:44:38 +01003180 Ccmp(result_end, alloc_limit, NoFlag, cc);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003181 B(hi, gc_required);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003182
Ben Murdochc5610432016-08-08 18:44:38 +01003183 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
3184 // The top pointer is not updated for allocation folding dominators.
3185 Str(result_end, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003186 }
Ben Murdochc5610432016-08-08 18:44:38 +01003187
3188 // Tag the object.
3189 ObjectTag(result, result);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003190}
3191
Ben Murdochc5610432016-08-08 18:44:38 +01003192void MacroAssembler::FastAllocate(int object_size, Register result,
3193 Register scratch1, Register scratch2,
3194 AllocationFlags flags) {
3195 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3196
3197 DCHECK(!AreAliased(result, scratch1, scratch2));
3198 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3199
3200 // Make object size into bytes.
3201 if ((flags & SIZE_IN_WORDS) != 0) {
3202 object_size *= kPointerSize;
3203 }
3204 DCHECK(0 == (object_size & kObjectAlignmentMask));
3205
3206 ExternalReference heap_allocation_top =
3207 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3208
3209 // Set up allocation top address and allocation limit registers.
3210 Register top_address = scratch1;
3211 Register result_end = scratch2;
3212 Mov(top_address, Operand(heap_allocation_top));
3213 Ldr(result, MemOperand(top_address));
3214
3215 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3216 // the same alignment on ARM64.
3217 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3218
3219 // Calculate new top and write it back.
3220 Adds(result_end, result, object_size);
3221 Str(result_end, MemOperand(top_address));
3222
3223 ObjectTag(result, result);
3224}
3225
3226void MacroAssembler::FastAllocate(Register object_size, Register result,
3227 Register result_end, Register scratch,
3228 AllocationFlags flags) {
3229 // |object_size| and |result_end| may overlap, other registers must not.
3230 DCHECK(!AreAliased(object_size, result, scratch));
3231 DCHECK(!AreAliased(result_end, result, scratch));
3232 DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
3233 result_end.Is64Bits());
3234
3235 ExternalReference heap_allocation_top =
3236 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3237
3238 // Set up allocation top address and allocation limit registers.
3239 Register top_address = scratch;
3240 Mov(top_address, heap_allocation_top);
3241 Ldr(result, MemOperand(top_address));
3242
3243 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3244 // the same alignment on ARM64.
3245 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3246
3247 // Calculate new top and write it back.
3248 if ((flags & SIZE_IN_WORDS) != 0) {
3249 Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
3250 } else {
3251 Adds(result_end, result, object_size);
3252 }
3253 Str(result_end, MemOperand(top_address));
3254
3255 if (emit_debug_code()) {
3256 Tst(result_end, kObjectAlignmentMask);
3257 Check(eq, kUnalignedAllocationInNewSpace);
3258 }
3259
3260 ObjectTag(result, result);
3261}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003262
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003263void MacroAssembler::AllocateTwoByteString(Register result,
3264 Register length,
3265 Register scratch1,
3266 Register scratch2,
3267 Register scratch3,
3268 Label* gc_required) {
3269 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3270 // Calculate the number of bytes needed for the characters in the string while
3271 // observing object alignment.
3272 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3273 Add(scratch1, length, length); // Length in bytes, not chars.
3274 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3275 Bic(scratch1, scratch1, kObjectAlignmentMask);
3276
3277 // Allocate two-byte string in new space.
Ben Murdochc5610432016-08-08 18:44:38 +01003278 Allocate(scratch1, result, scratch2, scratch3, gc_required,
3279 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003280
3281 // Set the map, length and hash field.
3282 InitializeNewString(result,
3283 length,
3284 Heap::kStringMapRootIndex,
3285 scratch1,
3286 scratch2);
3287}
3288
3289
3290void MacroAssembler::AllocateOneByteString(Register result, Register length,
3291 Register scratch1, Register scratch2,
3292 Register scratch3,
3293 Label* gc_required) {
3294 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3295 // Calculate the number of bytes needed for the characters in the string while
3296 // observing object alignment.
3297 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3298 STATIC_ASSERT(kCharSize == 1);
3299 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3300 Bic(scratch1, scratch1, kObjectAlignmentMask);
3301
3302 // Allocate one-byte string in new space.
Ben Murdochc5610432016-08-08 18:44:38 +01003303 Allocate(scratch1, result, scratch2, scratch3, gc_required,
3304 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003305
3306 // Set the map, length and hash field.
3307 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3308 scratch1, scratch2);
3309}
3310
3311
3312void MacroAssembler::AllocateTwoByteConsString(Register result,
3313 Register length,
3314 Register scratch1,
3315 Register scratch2,
3316 Label* gc_required) {
3317 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
Ben Murdochc5610432016-08-08 18:44:38 +01003318 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003319
3320 InitializeNewString(result,
3321 length,
3322 Heap::kConsStringMapRootIndex,
3323 scratch1,
3324 scratch2);
3325}
3326
3327
3328void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3329 Register scratch1,
3330 Register scratch2,
3331 Label* gc_required) {
Ben Murdochc5610432016-08-08 18:44:38 +01003332 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3333 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003334
3335 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3336 scratch1, scratch2);
3337}
3338
3339
3340void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3341 Register length,
3342 Register scratch1,
3343 Register scratch2,
3344 Label* gc_required) {
3345 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3346 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
Ben Murdochc5610432016-08-08 18:44:38 +01003347 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003348
3349 InitializeNewString(result,
3350 length,
3351 Heap::kSlicedStringMapRootIndex,
3352 scratch1,
3353 scratch2);
3354}
3355
3356
3357void MacroAssembler::AllocateOneByteSlicedString(Register result,
3358 Register length,
3359 Register scratch1,
3360 Register scratch2,
3361 Label* gc_required) {
3362 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3363 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
Ben Murdochc5610432016-08-08 18:44:38 +01003364 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003365
3366 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3367 scratch1, scratch2);
3368}
3369
3370
3371// Allocates a heap number or jumps to the need_gc label if the young space
3372// is full and a scavenge is needed.
3373void MacroAssembler::AllocateHeapNumber(Register result,
3374 Label* gc_required,
3375 Register scratch1,
3376 Register scratch2,
3377 CPURegister value,
3378 CPURegister heap_number_map,
3379 MutableMode mode) {
3380 DCHECK(!value.IsValid() || value.Is64Bits());
3381 UseScratchRegisterScope temps(this);
3382
3383 // Allocate an object in the heap for the heap number and tag it as a heap
3384 // object.
3385 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3386 NO_ALLOCATION_FLAGS);
3387
3388 Heap::RootListIndex map_index = mode == MUTABLE
3389 ? Heap::kMutableHeapNumberMapRootIndex
3390 : Heap::kHeapNumberMapRootIndex;
3391
3392 // Prepare the heap number map.
3393 if (!heap_number_map.IsValid()) {
3394 // If we have a valid value register, use the same type of register to store
3395 // the map so we can use STP to store both in one instruction.
3396 if (value.IsValid() && value.IsFPRegister()) {
3397 heap_number_map = temps.AcquireD();
3398 } else {
3399 heap_number_map = scratch1;
3400 }
3401 LoadRoot(heap_number_map, map_index);
3402 }
3403 if (emit_debug_code()) {
3404 Register map;
3405 if (heap_number_map.IsFPRegister()) {
3406 map = scratch1;
3407 Fmov(map, DoubleRegister(heap_number_map));
3408 } else {
3409 map = Register(heap_number_map);
3410 }
3411 AssertRegisterIsRoot(map, map_index);
3412 }
3413
3414 // Store the heap number map and the value in the allocated object.
3415 if (value.IsSameSizeAndType(heap_number_map)) {
3416 STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
3417 HeapNumber::kValueOffset);
Ben Murdochc5610432016-08-08 18:44:38 +01003418 Stp(heap_number_map, value,
3419 FieldMemOperand(result, HeapObject::kMapOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003420 } else {
Ben Murdochc5610432016-08-08 18:44:38 +01003421 Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003422 if (value.IsValid()) {
Ben Murdochc5610432016-08-08 18:44:38 +01003423 Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003424 }
3425 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003426}
3427
3428
3429void MacroAssembler::JumpIfObjectType(Register object,
3430 Register map,
3431 Register type_reg,
3432 InstanceType type,
3433 Label* if_cond_pass,
3434 Condition cond) {
3435 CompareObjectType(object, map, type_reg, type);
3436 B(cond, if_cond_pass);
3437}
3438
3439
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003440void MacroAssembler::AllocateJSValue(Register result, Register constructor,
3441 Register value, Register scratch1,
3442 Register scratch2, Label* gc_required) {
3443 DCHECK(!result.is(constructor));
3444 DCHECK(!result.is(scratch1));
3445 DCHECK(!result.is(scratch2));
3446 DCHECK(!result.is(value));
3447
3448 // Allocate JSValue in new space.
Ben Murdochc5610432016-08-08 18:44:38 +01003449 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
3450 NO_ALLOCATION_FLAGS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003451
3452 // Initialize the JSValue.
3453 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
3454 Str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
3455 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
3456 Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
3457 Str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
3458 Str(value, FieldMemOperand(result, JSValue::kValueOffset));
3459 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
3460}
3461
3462
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003463void MacroAssembler::JumpIfNotObjectType(Register object,
3464 Register map,
3465 Register type_reg,
3466 InstanceType type,
3467 Label* if_not_object) {
3468 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
3469}
3470
3471
3472// Sets condition flags based on comparison, and returns type in type_reg.
3473void MacroAssembler::CompareObjectType(Register object,
3474 Register map,
3475 Register type_reg,
3476 InstanceType type) {
3477 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3478 CompareInstanceType(map, type_reg, type);
3479}
3480
3481
3482// Sets condition flags based on comparison, and returns type in type_reg.
3483void MacroAssembler::CompareInstanceType(Register map,
3484 Register type_reg,
3485 InstanceType type) {
3486 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3487 Cmp(type_reg, type);
3488}
3489
3490
3491void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
3492 UseScratchRegisterScope temps(this);
3493 Register obj_map = temps.AcquireX();
3494 Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
3495 CompareRoot(obj_map, index);
3496}
3497
3498
3499void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
3500 Handle<Map> map) {
3501 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3502 CompareMap(scratch, map);
3503}
3504
3505
3506void MacroAssembler::CompareMap(Register obj_map,
3507 Handle<Map> map) {
3508 Cmp(obj_map, Operand(map));
3509}
3510
3511
3512void MacroAssembler::CheckMap(Register obj,
3513 Register scratch,
3514 Handle<Map> map,
3515 Label* fail,
3516 SmiCheckType smi_check_type) {
3517 if (smi_check_type == DO_SMI_CHECK) {
3518 JumpIfSmi(obj, fail);
3519 }
3520
3521 CompareObjectMap(obj, scratch, map);
3522 B(ne, fail);
3523}
3524
3525
3526void MacroAssembler::CheckMap(Register obj,
3527 Register scratch,
3528 Heap::RootListIndex index,
3529 Label* fail,
3530 SmiCheckType smi_check_type) {
3531 if (smi_check_type == DO_SMI_CHECK) {
3532 JumpIfSmi(obj, fail);
3533 }
3534 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3535 JumpIfNotRoot(scratch, index, fail);
3536}
3537
3538
3539void MacroAssembler::CheckMap(Register obj_map,
3540 Handle<Map> map,
3541 Label* fail,
3542 SmiCheckType smi_check_type) {
3543 if (smi_check_type == DO_SMI_CHECK) {
3544 JumpIfSmi(obj_map, fail);
3545 }
3546
3547 CompareMap(obj_map, map);
3548 B(ne, fail);
3549}
3550
3551
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003552void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3553 Register scratch2, Handle<WeakCell> cell,
3554 Handle<Code> success,
3555 SmiCheckType smi_check_type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003556 Label fail;
3557 if (smi_check_type == DO_SMI_CHECK) {
3558 JumpIfSmi(obj, &fail);
3559 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003560 Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3561 CmpWeakValue(scratch1, cell, scratch2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003562 B(ne, &fail);
3563 Jump(success, RelocInfo::CODE_TARGET);
3564 Bind(&fail);
3565}
3566
3567
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003568void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
3569 Register scratch) {
3570 Mov(scratch, Operand(cell));
3571 Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
3572 Cmp(value, scratch);
3573}
3574
3575
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003576void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003577 Mov(value, Operand(cell));
3578 Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003579}
3580
3581
3582void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3583 Label* miss) {
3584 GetWeakValue(value, cell);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003585 JumpIfSmi(value, miss);
3586}
3587
3588
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003589void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
3590 UseScratchRegisterScope temps(this);
3591 Register temp = temps.AcquireX();
3592 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
3593 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3594 Tst(temp, mask);
3595}
3596
3597
3598void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
3599 // Load the map's "bit field 2".
3600 __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
3601 // Retrieve elements_kind from bit field 2.
3602 DecodeField<Map::ElementsKindBits>(result);
3603}
3604
3605
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003606void MacroAssembler::GetMapConstructor(Register result, Register map,
3607 Register temp, Register temp2) {
3608 Label done, loop;
3609 Ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
3610 Bind(&loop);
3611 JumpIfSmi(result, &done);
3612 CompareObjectType(result, temp, temp2, MAP_TYPE);
3613 B(ne, &done);
3614 Ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
3615 B(&loop);
3616 Bind(&done);
3617}
3618
3619
3620void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
3621 Register scratch, Label* miss) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003622 DCHECK(!AreAliased(function, result, scratch));
3623
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003624 // Get the prototype or initial map from the function.
3625 Ldr(result,
3626 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3627
3628 // If the prototype or initial map is the hole, don't return it and simply
3629 // miss the cache instead. This will allow us to allocate a prototype object
3630 // on-demand in the runtime system.
3631 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
3632
3633 // If the function does not have an initial map, we're done.
3634 Label done;
3635 JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
3636
3637 // Get the prototype from the initial map.
3638 Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3639
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003640 // All done.
3641 Bind(&done);
3642}
3643
3644
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003645void MacroAssembler::PushRoot(Heap::RootListIndex index) {
3646 UseScratchRegisterScope temps(this);
3647 Register temp = temps.AcquireX();
3648 LoadRoot(temp, index);
3649 Push(temp);
3650}
3651
3652
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003653void MacroAssembler::CompareRoot(const Register& obj,
3654 Heap::RootListIndex index) {
3655 UseScratchRegisterScope temps(this);
3656 Register temp = temps.AcquireX();
3657 DCHECK(!AreAliased(obj, temp));
3658 LoadRoot(temp, index);
3659 Cmp(obj, temp);
3660}
3661
3662
3663void MacroAssembler::JumpIfRoot(const Register& obj,
3664 Heap::RootListIndex index,
3665 Label* if_equal) {
3666 CompareRoot(obj, index);
3667 B(eq, if_equal);
3668}
3669
3670
3671void MacroAssembler::JumpIfNotRoot(const Register& obj,
3672 Heap::RootListIndex index,
3673 Label* if_not_equal) {
3674 CompareRoot(obj, index);
3675 B(ne, if_not_equal);
3676}
3677
3678
3679void MacroAssembler::CompareAndSplit(const Register& lhs,
3680 const Operand& rhs,
3681 Condition cond,
3682 Label* if_true,
3683 Label* if_false,
3684 Label* fall_through) {
3685 if ((if_true == if_false) && (if_false == fall_through)) {
3686 // Fall through.
3687 } else if (if_true == if_false) {
3688 B(if_true);
3689 } else if (if_false == fall_through) {
3690 CompareAndBranch(lhs, rhs, cond, if_true);
3691 } else if (if_true == fall_through) {
3692 CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
3693 } else {
3694 CompareAndBranch(lhs, rhs, cond, if_true);
3695 B(if_false);
3696 }
3697}
3698
3699
3700void MacroAssembler::TestAndSplit(const Register& reg,
3701 uint64_t bit_pattern,
3702 Label* if_all_clear,
3703 Label* if_any_set,
3704 Label* fall_through) {
3705 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3706 // Fall through.
3707 } else if (if_all_clear == if_any_set) {
3708 B(if_all_clear);
3709 } else if (if_all_clear == fall_through) {
3710 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3711 } else if (if_any_set == fall_through) {
3712 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
3713 } else {
3714 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3715 B(if_all_clear);
3716 }
3717}
3718
3719
3720void MacroAssembler::CheckFastElements(Register map,
3721 Register scratch,
3722 Label* fail) {
3723 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3724 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3725 STATIC_ASSERT(FAST_ELEMENTS == 2);
3726 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3727 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3728 Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
3729 B(hi, fail);
3730}
3731
3732
3733void MacroAssembler::CheckFastObjectElements(Register map,
3734 Register scratch,
3735 Label* fail) {
3736 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3737 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3738 STATIC_ASSERT(FAST_ELEMENTS == 2);
3739 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3740 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3741 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3742 // If cond==ls, set cond=hi, otherwise compare.
3743 Ccmp(scratch,
3744 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
3745 B(hi, fail);
3746}
3747
3748
3749// Note: The ARM version of this clobbers elements_reg, but this version does
3750// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
3751void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3752 Register key_reg,
3753 Register elements_reg,
3754 Register scratch1,
3755 FPRegister fpscratch1,
3756 Label* fail,
3757 int elements_offset) {
3758 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
3759 Label store_num;
3760
3761 // Speculatively convert the smi to a double - all smis can be exactly
3762 // represented as a double.
3763 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
3764
3765 // If value_reg is a smi, we're done.
3766 JumpIfSmi(value_reg, &store_num);
3767
3768 // Ensure that the object is a heap number.
3769 JumpIfNotHeapNumber(value_reg, fail);
3770
3771 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3772
3773 // Canonicalize NaNs.
3774 CanonicalizeNaN(fpscratch1);
3775
3776 // Store the result.
3777 Bind(&store_num);
3778 Add(scratch1, elements_reg,
3779 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
3780 Str(fpscratch1,
3781 FieldMemOperand(scratch1,
3782 FixedDoubleArray::kHeaderSize - elements_offset));
3783}
3784
3785
3786bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3787 return has_frame_ || !stub->SometimesSetsUpAFrame();
3788}
3789
3790
3791void MacroAssembler::IndexFromHash(Register hash, Register index) {
3792 // If the hash field contains an array index pick it out. The assert checks
3793 // that the constants for the maximum number of digits for an array index
3794 // cached in the hash field and the number of bits reserved for it does not
3795 // conflict.
3796 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
3797 (1 << String::kArrayIndexValueBits));
3798 DecodeField<String::ArrayIndexValueBits>(index, hash);
3799 SmiTag(index, index);
3800}
3801
3802
3803void MacroAssembler::EmitSeqStringSetCharCheck(
3804 Register string,
3805 Register index,
3806 SeqStringSetCharCheckIndexType index_type,
3807 Register scratch,
3808 uint32_t encoding_mask) {
3809 DCHECK(!AreAliased(string, index, scratch));
3810
3811 if (index_type == kIndexIsSmi) {
3812 AssertSmi(index);
3813 }
3814
3815 // Check that string is an object.
3816 AssertNotSmi(string, kNonObject);
3817
3818 // Check that string has an appropriate map.
3819 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
3820 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3821
3822 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
3823 Cmp(scratch, encoding_mask);
3824 Check(eq, kUnexpectedStringType);
3825
3826 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
3827 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
3828 Check(lt, kIndexIsTooLarge);
3829
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003830 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003831 Cmp(index, 0);
3832 Check(ge, kIndexIsNegative);
3833}
3834
3835
3836void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3837 Register scratch1,
3838 Register scratch2,
3839 Label* miss) {
3840 DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
3841 Label same_contexts;
3842
Ben Murdochda12d292016-06-02 14:46:10 +01003843 // Load current lexical context from the active StandardFrame, which
3844 // may require crawling past STUB frames.
3845 Label load_context;
3846 Label has_context;
3847 Mov(scratch2, fp);
3848 bind(&load_context);
3849 Ldr(scratch1,
3850 MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
3851 JumpIfNotSmi(scratch1, &has_context);
3852 Ldr(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
3853 B(&load_context);
3854 bind(&has_context);
3855
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003856 // In debug mode, make sure the lexical context is set.
3857#ifdef DEBUG
3858 Cmp(scratch1, 0);
3859 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
3860#endif
3861
3862 // Load the native context of the current context.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003863 Ldr(scratch1, ContextMemOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003864
3865 // Check the context is a native context.
3866 if (emit_debug_code()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003867 // Read the first word and compare to the native_context_map.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003868 Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
3869 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
3870 Check(eq, kExpectedNativeContext);
3871 }
3872
3873 // Check if both contexts are the same.
3874 Ldr(scratch2, FieldMemOperand(holder_reg,
3875 JSGlobalProxy::kNativeContextOffset));
3876 Cmp(scratch1, scratch2);
3877 B(&same_contexts, eq);
3878
3879 // Check the context is a native context.
3880 if (emit_debug_code()) {
3881 // We're short on scratch registers here, so use holder_reg as a scratch.
3882 Push(holder_reg);
3883 Register scratch3 = holder_reg;
3884
3885 CompareRoot(scratch2, Heap::kNullValueRootIndex);
3886 Check(ne, kExpectedNonNullContext);
3887
3888 Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
3889 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
3890 Check(eq, kExpectedNativeContext);
3891 Pop(holder_reg);
3892 }
3893
3894 // Check that the security token in the calling global object is
3895 // compatible with the security token in the receiving global
3896 // object.
3897 int token_offset = Context::kHeaderSize +
3898 Context::SECURITY_TOKEN_INDEX * kPointerSize;
3899
3900 Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
3901 Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
3902 Cmp(scratch1, scratch2);
3903 B(miss, ne);
3904
3905 Bind(&same_contexts);
3906}
3907
3908
3909// Compute the hash code from the untagged key. This must be kept in sync with
3910// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
3911// code-stub-hydrogen.cc
3912void MacroAssembler::GetNumberHash(Register key, Register scratch) {
3913 DCHECK(!AreAliased(key, scratch));
3914
3915 // Xor original key with a seed.
3916 LoadRoot(scratch, Heap::kHashSeedRootIndex);
3917 Eor(key, key, Operand::UntagSmi(scratch));
3918
3919 // The algorithm uses 32-bit integer values.
3920 key = key.W();
3921 scratch = scratch.W();
3922
3923 // Compute the hash code from the untagged key. This must be kept in sync
3924 // with ComputeIntegerHash in utils.h.
3925 //
3926 // hash = ~hash + (hash <<1 15);
3927 Mvn(scratch, key);
3928 Add(key, scratch, Operand(key, LSL, 15));
3929 // hash = hash ^ (hash >> 12);
3930 Eor(key, key, Operand(key, LSR, 12));
3931 // hash = hash + (hash << 2);
3932 Add(key, key, Operand(key, LSL, 2));
3933 // hash = hash ^ (hash >> 4);
3934 Eor(key, key, Operand(key, LSR, 4));
3935 // hash = hash * 2057;
3936 Mov(scratch, Operand(key, LSL, 11));
3937 Add(key, key, Operand(key, LSL, 3));
3938 Add(key, key, scratch);
3939 // hash = hash ^ (hash >> 16);
3940 Eor(key, key, Operand(key, LSR, 16));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003941 Bic(key, key, Operand(0xc0000000u));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003942}
3943
3944
3945void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3946 Register elements,
3947 Register key,
3948 Register result,
3949 Register scratch0,
3950 Register scratch1,
3951 Register scratch2,
3952 Register scratch3) {
3953 DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
3954
3955 Label done;
3956
3957 SmiUntag(scratch0, key);
3958 GetNumberHash(scratch0, scratch1);
3959
3960 // Compute the capacity mask.
3961 Ldrsw(scratch1,
3962 UntagSmiFieldMemOperand(elements,
3963 SeededNumberDictionary::kCapacityOffset));
3964 Sub(scratch1, scratch1, 1);
3965
3966 // Generate an unrolled loop that performs a few probes before giving up.
3967 for (int i = 0; i < kNumberDictionaryProbes; i++) {
3968 // Compute the masked index: (hash + i + i * i) & mask.
3969 if (i > 0) {
3970 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
3971 } else {
3972 Mov(scratch2, scratch0);
3973 }
3974 And(scratch2, scratch2, scratch1);
3975
3976 // Scale the index by multiplying by the element size.
3977 DCHECK(SeededNumberDictionary::kEntrySize == 3);
3978 Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
3979
3980 // Check if the key is identical to the name.
3981 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
3982 Ldr(scratch3,
3983 FieldMemOperand(scratch2,
3984 SeededNumberDictionary::kElementsStartOffset));
3985 Cmp(key, scratch3);
3986 if (i != (kNumberDictionaryProbes - 1)) {
3987 B(eq, &done);
3988 } else {
3989 B(ne, miss);
3990 }
3991 }
3992
3993 Bind(&done);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003994 // Check that the value is a field property.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003995 const int kDetailsOffset =
3996 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
3997 Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003998 DCHECK_EQ(DATA, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003999 TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
4000
4001 // Get the value at the masked, scaled index and return.
4002 const int kValueOffset =
4003 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4004 Ldr(result, FieldMemOperand(scratch2, kValueOffset));
4005}
4006
Ben Murdoch097c5b22016-05-18 11:27:45 +01004007void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
4008 Register code_entry,
4009 Register scratch) {
4010 const int offset = JSFunction::kCodeEntryOffset;
4011
4012 // Since a code entry (value) is always in old space, we don't need to update
4013 // remembered set. If incremental marking is off, there is nothing for us to
4014 // do.
4015 if (!FLAG_incremental_marking) return;
4016
4017 DCHECK(js_function.is(x1));
4018 DCHECK(code_entry.is(x7));
4019 DCHECK(scratch.is(x5));
4020 AssertNotSmi(js_function);
4021
4022 if (emit_debug_code()) {
4023 UseScratchRegisterScope temps(this);
4024 Register temp = temps.AcquireX();
4025 Add(scratch, js_function, offset - kHeapObjectTag);
4026 Ldr(temp, MemOperand(scratch));
4027 Cmp(temp, code_entry);
4028 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4029 }
4030
4031 // First, check if a write barrier is even needed. The tests below
4032 // catch stores of Smis and stores into young gen.
4033 Label done;
4034
4035 CheckPageFlagClear(code_entry, scratch,
4036 MemoryChunk::kPointersToHereAreInterestingMask, &done);
4037 CheckPageFlagClear(js_function, scratch,
4038 MemoryChunk::kPointersFromHereAreInterestingMask, &done);
4039
4040 const Register dst = scratch;
4041 Add(dst, js_function, offset - kHeapObjectTag);
4042
4043 // Save caller-saved registers.Both input registers (x1 and x7) are caller
4044 // saved, so there is no need to push them.
4045 PushCPURegList(kCallerSaved);
4046
4047 int argument_count = 3;
4048
4049 Mov(x0, js_function);
4050 Mov(x1, dst);
4051 Mov(x2, ExternalReference::isolate_address(isolate()));
4052
4053 {
4054 AllowExternalCallThatCantCauseGC scope(this);
4055 CallCFunction(
4056 ExternalReference::incremental_marking_record_write_code_entry_function(
4057 isolate()),
4058 argument_count);
4059 }
4060
4061 // Restore caller-saved registers.
4062 PopCPURegList(kCallerSaved);
4063
4064 Bind(&done);
4065}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004066
4067void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
4068 Register address,
4069 Register scratch1,
4070 SaveFPRegsMode fp_mode,
4071 RememberedSetFinalAction and_then) {
4072 DCHECK(!AreAliased(object, address, scratch1));
4073 Label done, store_buffer_overflow;
4074 if (emit_debug_code()) {
4075 Label ok;
4076 JumpIfNotInNewSpace(object, &ok);
4077 Abort(kRememberedSetPointerInNewSpace);
4078 bind(&ok);
4079 }
4080 UseScratchRegisterScope temps(this);
4081 Register scratch2 = temps.AcquireX();
4082
4083 // Load store buffer top.
4084 Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
4085 Ldr(scratch1, MemOperand(scratch2));
4086 // Store pointer to buffer and increment buffer top.
4087 Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
4088 // Write back new top of buffer.
4089 Str(scratch1, MemOperand(scratch2));
4090 // Call stub on end of buffer.
4091 // Check for end of buffer.
Ben Murdochda12d292016-06-02 14:46:10 +01004092 Tst(scratch1, StoreBuffer::kStoreBufferMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004093 if (and_then == kFallThroughAtEnd) {
Ben Murdochda12d292016-06-02 14:46:10 +01004094 B(ne, &done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004095 } else {
4096 DCHECK(and_then == kReturnAtEnd);
Ben Murdochda12d292016-06-02 14:46:10 +01004097 B(eq, &store_buffer_overflow);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004098 Ret();
4099 }
4100
4101 Bind(&store_buffer_overflow);
4102 Push(lr);
4103 StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
4104 CallStub(&store_buffer_overflow_stub);
4105 Pop(lr);
4106
4107 Bind(&done);
4108 if (and_then == kReturnAtEnd) {
4109 Ret();
4110 }
4111}
4112
4113
4114void MacroAssembler::PopSafepointRegisters() {
4115 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4116 PopXRegList(kSafepointSavedRegisters);
4117 Drop(num_unsaved);
4118}
4119
4120
4121void MacroAssembler::PushSafepointRegisters() {
4122 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
4123 // adjust the stack for unsaved registers.
4124 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4125 DCHECK(num_unsaved >= 0);
4126 Claim(num_unsaved);
4127 PushXRegList(kSafepointSavedRegisters);
4128}
4129
4130
4131void MacroAssembler::PushSafepointRegistersAndDoubles() {
4132 PushSafepointRegisters();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004133 PushCPURegList(CPURegList(
4134 CPURegister::kFPRegister, kDRegSizeInBits,
4135 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
4136 ->allocatable_double_codes_mask()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004137}
4138
4139
4140void MacroAssembler::PopSafepointRegistersAndDoubles() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004141 PopCPURegList(CPURegList(
4142 CPURegister::kFPRegister, kDRegSizeInBits,
4143 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
4144 ->allocatable_double_codes_mask()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004145 PopSafepointRegisters();
4146}
4147
4148
4149int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
4150 // Make sure the safepoint registers list is what we expect.
4151 DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
4152
4153 // Safepoint registers are stored contiguously on the stack, but not all the
4154 // registers are saved. The following registers are excluded:
4155 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
4156 // the macro assembler.
4157 // - x28 (jssp) because JS stack pointer doesn't need to be included in
4158 // safepoint registers.
4159 // - x31 (csp) because the system stack pointer doesn't need to be included
4160 // in safepoint registers.
4161 //
4162 // This function implements the mapping of register code to index into the
4163 // safepoint register slots.
4164 if ((reg_code >= 0) && (reg_code <= 15)) {
4165 return reg_code;
4166 } else if ((reg_code >= 18) && (reg_code <= 27)) {
4167 // Skip ip0 and ip1.
4168 return reg_code - 2;
4169 } else if ((reg_code == 29) || (reg_code == 30)) {
4170 // Also skip jssp.
4171 return reg_code - 3;
4172 } else {
4173 // This register has no safepoint register slot.
4174 UNREACHABLE();
4175 return -1;
4176 }
4177}
4178
Ben Murdoch097c5b22016-05-18 11:27:45 +01004179void MacroAssembler::CheckPageFlag(const Register& object,
4180 const Register& scratch, int mask,
4181 Condition cc, Label* condition_met) {
4182 And(scratch, object, ~Page::kPageAlignmentMask);
4183 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4184 if (cc == eq) {
4185 TestAndBranchIfAnySet(scratch, mask, condition_met);
4186 } else {
4187 TestAndBranchIfAllClear(scratch, mask, condition_met);
4188 }
4189}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004190
4191void MacroAssembler::CheckPageFlagSet(const Register& object,
4192 const Register& scratch,
4193 int mask,
4194 Label* if_any_set) {
4195 And(scratch, object, ~Page::kPageAlignmentMask);
4196 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4197 TestAndBranchIfAnySet(scratch, mask, if_any_set);
4198}
4199
4200
4201void MacroAssembler::CheckPageFlagClear(const Register& object,
4202 const Register& scratch,
4203 int mask,
4204 Label* if_all_clear) {
4205 And(scratch, object, ~Page::kPageAlignmentMask);
4206 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4207 TestAndBranchIfAllClear(scratch, mask, if_all_clear);
4208}
4209
4210
4211void MacroAssembler::RecordWriteField(
4212 Register object,
4213 int offset,
4214 Register value,
4215 Register scratch,
4216 LinkRegisterStatus lr_status,
4217 SaveFPRegsMode save_fp,
4218 RememberedSetAction remembered_set_action,
4219 SmiCheck smi_check,
4220 PointersToHereCheck pointers_to_here_check_for_value) {
4221 // First, check if a write barrier is even needed. The tests below
4222 // catch stores of Smis.
4223 Label done;
4224
4225 // Skip the barrier if writing a smi.
4226 if (smi_check == INLINE_SMI_CHECK) {
4227 JumpIfSmi(value, &done);
4228 }
4229
4230 // Although the object register is tagged, the offset is relative to the start
4231 // of the object, so offset must be a multiple of kPointerSize.
4232 DCHECK(IsAligned(offset, kPointerSize));
4233
4234 Add(scratch, object, offset - kHeapObjectTag);
4235 if (emit_debug_code()) {
4236 Label ok;
4237 Tst(scratch, (1 << kPointerSizeLog2) - 1);
4238 B(eq, &ok);
4239 Abort(kUnalignedCellInWriteBarrier);
4240 Bind(&ok);
4241 }
4242
4243 RecordWrite(object,
4244 scratch,
4245 value,
4246 lr_status,
4247 save_fp,
4248 remembered_set_action,
4249 OMIT_SMI_CHECK,
4250 pointers_to_here_check_for_value);
4251
4252 Bind(&done);
4253
4254 // Clobber clobbered input registers when running with the debug-code flag
4255 // turned on to provoke errors.
4256 if (emit_debug_code()) {
4257 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
4258 Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
4259 }
4260}
4261
4262
4263// Will clobber: object, map, dst.
4264// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4265void MacroAssembler::RecordWriteForMap(Register object,
4266 Register map,
4267 Register dst,
4268 LinkRegisterStatus lr_status,
4269 SaveFPRegsMode fp_mode) {
4270 ASM_LOCATION("MacroAssembler::RecordWrite");
4271 DCHECK(!AreAliased(object, map));
4272
4273 if (emit_debug_code()) {
4274 UseScratchRegisterScope temps(this);
4275 Register temp = temps.AcquireX();
4276
4277 CompareObjectMap(map, temp, isolate()->factory()->meta_map());
4278 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4279 }
4280
4281 if (!FLAG_incremental_marking) {
4282 return;
4283 }
4284
4285 if (emit_debug_code()) {
4286 UseScratchRegisterScope temps(this);
4287 Register temp = temps.AcquireX();
4288
4289 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4290 Cmp(temp, map);
4291 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4292 }
4293
4294 // First, check if a write barrier is even needed. The tests below
4295 // catch stores of smis and stores into the young generation.
4296 Label done;
4297
4298 // A single check of the map's pages interesting flag suffices, since it is
4299 // only set during incremental collection, and then it's also guaranteed that
4300 // the from object's page's interesting flag is also set. This optimization
4301 // relies on the fact that maps can never be in new space.
4302 CheckPageFlagClear(map,
4303 map, // Used as scratch.
4304 MemoryChunk::kPointersToHereAreInterestingMask,
4305 &done);
4306
4307 // Record the actual write.
4308 if (lr_status == kLRHasNotBeenSaved) {
4309 Push(lr);
4310 }
4311 Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
4312 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
4313 fp_mode);
4314 CallStub(&stub);
4315 if (lr_status == kLRHasNotBeenSaved) {
4316 Pop(lr);
4317 }
4318
4319 Bind(&done);
4320
4321 // Count number of write barriers in generated code.
4322 isolate()->counters()->write_barriers_static()->Increment();
4323 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
4324 dst);
4325
4326 // Clobber clobbered registers when running with the debug-code flag
4327 // turned on to provoke errors.
4328 if (emit_debug_code()) {
4329 Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
4330 Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
4331 }
4332}
4333
4334
4335// Will clobber: object, address, value.
4336// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4337//
4338// The register 'object' contains a heap object pointer. The heap object tag is
4339// shifted away.
4340void MacroAssembler::RecordWrite(
4341 Register object,
4342 Register address,
4343 Register value,
4344 LinkRegisterStatus lr_status,
4345 SaveFPRegsMode fp_mode,
4346 RememberedSetAction remembered_set_action,
4347 SmiCheck smi_check,
4348 PointersToHereCheck pointers_to_here_check_for_value) {
4349 ASM_LOCATION("MacroAssembler::RecordWrite");
4350 DCHECK(!AreAliased(object, value));
4351
4352 if (emit_debug_code()) {
4353 UseScratchRegisterScope temps(this);
4354 Register temp = temps.AcquireX();
4355
4356 Ldr(temp, MemOperand(address));
4357 Cmp(temp, value);
4358 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4359 }
4360
4361 // First, check if a write barrier is even needed. The tests below
4362 // catch stores of smis and stores into the young generation.
4363 Label done;
4364
4365 if (smi_check == INLINE_SMI_CHECK) {
4366 DCHECK_EQ(0, kSmiTag);
4367 JumpIfSmi(value, &done);
4368 }
4369
4370 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
4371 CheckPageFlagClear(value,
4372 value, // Used as scratch.
4373 MemoryChunk::kPointersToHereAreInterestingMask,
4374 &done);
4375 }
4376 CheckPageFlagClear(object,
4377 value, // Used as scratch.
4378 MemoryChunk::kPointersFromHereAreInterestingMask,
4379 &done);
4380
4381 // Record the actual write.
4382 if (lr_status == kLRHasNotBeenSaved) {
4383 Push(lr);
4384 }
4385 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
4386 fp_mode);
4387 CallStub(&stub);
4388 if (lr_status == kLRHasNotBeenSaved) {
4389 Pop(lr);
4390 }
4391
4392 Bind(&done);
4393
4394 // Count number of write barriers in generated code.
4395 isolate()->counters()->write_barriers_static()->Increment();
4396 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
4397 value);
4398
4399 // Clobber clobbered registers when running with the debug-code flag
4400 // turned on to provoke errors.
4401 if (emit_debug_code()) {
4402 Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
4403 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
4404 }
4405}
4406
4407
4408void MacroAssembler::AssertHasValidColor(const Register& reg) {
4409 if (emit_debug_code()) {
4410 // The bit sequence is backward. The first character in the string
4411 // represents the least significant bit.
4412 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4413
4414 Label color_is_valid;
4415 Tbnz(reg, 0, &color_is_valid);
4416 Tbz(reg, 1, &color_is_valid);
4417 Abort(kUnexpectedColorFound);
4418 Bind(&color_is_valid);
4419 }
4420}
4421
4422
4423void MacroAssembler::GetMarkBits(Register addr_reg,
4424 Register bitmap_reg,
4425 Register shift_reg) {
4426 DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
4427 DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4428 // addr_reg is divided into fields:
4429 // |63 page base 20|19 high 8|7 shift 3|2 0|
4430 // 'high' gives the index of the cell holding color bits for the object.
4431 // 'shift' gives the offset in the cell for this object's color.
4432 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4433 UseScratchRegisterScope temps(this);
4434 Register temp = temps.AcquireX();
4435 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
4436 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4437 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
4438 // bitmap_reg:
4439 // |63 page base 20|19 zeros 15|14 high 3|2 0|
4440 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4441}
4442
4443
4444void MacroAssembler::HasColor(Register object,
4445 Register bitmap_scratch,
4446 Register shift_scratch,
4447 Label* has_color,
4448 int first_bit,
4449 int second_bit) {
4450 // See mark-compact.h for color definitions.
4451 DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
4452
4453 GetMarkBits(object, bitmap_scratch, shift_scratch);
4454 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4455 // Shift the bitmap down to get the color of the object in bits [1:0].
4456 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4457
4458 AssertHasValidColor(bitmap_scratch);
4459
4460 // These bit sequences are backwards. The first character in the string
4461 // represents the least significant bit.
4462 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004463 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4464 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004465
4466 // Check for the color.
4467 if (first_bit == 0) {
4468 // Checking for white.
4469 DCHECK(second_bit == 0);
4470 // We only need to test the first bit.
4471 Tbz(bitmap_scratch, 0, has_color);
4472 } else {
4473 Label other_color;
4474 // Checking for grey or black.
4475 Tbz(bitmap_scratch, 0, &other_color);
4476 if (second_bit == 0) {
4477 Tbz(bitmap_scratch, 1, has_color);
4478 } else {
4479 Tbnz(bitmap_scratch, 1, has_color);
4480 }
4481 Bind(&other_color);
4482 }
4483
4484 // Fall through if it does not have the right color.
4485}
4486
4487
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004488void MacroAssembler::JumpIfBlack(Register object,
4489 Register scratch0,
4490 Register scratch1,
4491 Label* on_black) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004492 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4493 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004494}
4495
4496
4497void MacroAssembler::JumpIfDictionaryInPrototypeChain(
4498 Register object,
4499 Register scratch0,
4500 Register scratch1,
4501 Label* found) {
4502 DCHECK(!AreAliased(object, scratch0, scratch1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004503 Register current = scratch0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004504 Label loop_again, end;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004505
4506 // Scratch contains elements pointer.
4507 Mov(current, object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004508 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4509 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4510 CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004511
4512 // Loop based on the map going up the prototype chain.
4513 Bind(&loop_again);
4514 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004515 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
4516 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
4517 CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
4518 B(lo, found);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004519 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4520 DecodeField<Map::ElementsKindBits>(scratch1);
4521 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
4522 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004523 CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);
4524
4525 Bind(&end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004526}
4527
4528
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004529void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
4530 Register shift_scratch, Register load_scratch,
4531 Register length_scratch,
4532 Label* value_is_white) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004533 DCHECK(!AreAliased(
4534 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4535
4536 // These bit sequences are backwards. The first character in the string
4537 // represents the least significant bit.
4538 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004539 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4540 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004541
4542 GetMarkBits(value, bitmap_scratch, shift_scratch);
4543 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4544 Lsr(load_scratch, load_scratch, shift_scratch);
4545
4546 AssertHasValidColor(load_scratch);
4547
4548 // If the value is black or grey we don't need to do anything.
4549 // Since both black and grey have a 1 in the first position and white does
4550 // not have a 1 there we only need to check one bit.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004551 Tbz(load_scratch, 0, value_is_white);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004552}
4553
4554
4555void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
4556 if (emit_debug_code()) {
4557 Check(cond, reason);
4558 }
4559}
4560
4561
4562
4563void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
4564 if (emit_debug_code()) {
4565 CheckRegisterIsClear(reg, reason);
4566 }
4567}
4568
4569
4570void MacroAssembler::AssertRegisterIsRoot(Register reg,
4571 Heap::RootListIndex index,
4572 BailoutReason reason) {
4573 if (emit_debug_code()) {
4574 CompareRoot(reg, index);
4575 Check(eq, reason);
4576 }
4577}
4578
4579
4580void MacroAssembler::AssertFastElements(Register elements) {
4581 if (emit_debug_code()) {
4582 UseScratchRegisterScope temps(this);
4583 Register temp = temps.AcquireX();
4584 Label ok;
4585 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
4586 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4587 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4588 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4589 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4590 Bind(&ok);
4591 }
4592}
4593
4594
4595void MacroAssembler::AssertIsString(const Register& object) {
4596 if (emit_debug_code()) {
4597 UseScratchRegisterScope temps(this);
4598 Register temp = temps.AcquireX();
4599 STATIC_ASSERT(kSmiTag == 0);
4600 Tst(object, kSmiTagMask);
4601 Check(ne, kOperandIsNotAString);
4602 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4603 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
4604 Check(lo, kOperandIsNotAString);
4605 }
4606}
4607
4608
4609void MacroAssembler::Check(Condition cond, BailoutReason reason) {
4610 Label ok;
4611 B(cond, &ok);
4612 Abort(reason);
4613 // Will not return here.
4614 Bind(&ok);
4615}
4616
4617
4618void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
4619 Label ok;
4620 Cbz(reg, &ok);
4621 Abort(reason);
4622 // Will not return here.
4623 Bind(&ok);
4624}
4625
4626
4627void MacroAssembler::Abort(BailoutReason reason) {
4628#ifdef DEBUG
4629 RecordComment("Abort message: ");
4630 RecordComment(GetBailoutReason(reason));
4631
4632 if (FLAG_trap_on_abort) {
4633 Brk(0);
4634 return;
4635 }
4636#endif
4637
4638 // Abort is used in some contexts where csp is the stack pointer. In order to
4639 // simplify the CallRuntime code, make sure that jssp is the stack pointer.
4640 // There is no risk of register corruption here because Abort doesn't return.
4641 Register old_stack_pointer = StackPointer();
4642 SetStackPointer(jssp);
4643 Mov(jssp, old_stack_pointer);
4644
4645 // We need some scratch registers for the MacroAssembler, so make sure we have
4646 // some. This is safe here because Abort never returns.
4647 RegList old_tmp_list = TmpList()->list();
4648 TmpList()->Combine(MacroAssembler::DefaultTmpList());
4649
4650 if (use_real_aborts()) {
4651 // Avoid infinite recursion; Push contains some assertions that use Abort.
4652 NoUseRealAbortsScope no_real_aborts(this);
4653
4654 Mov(x0, Smi::FromInt(reason));
4655 Push(x0);
4656
4657 if (!has_frame_) {
4658 // We don't actually want to generate a pile of code for this, so just
4659 // claim there is a stack frame, without generating one.
4660 FrameScope scope(this, StackFrame::NONE);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004661 CallRuntime(Runtime::kAbort);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004662 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004663 CallRuntime(Runtime::kAbort);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004664 }
4665 } else {
4666 // Load the string to pass to Printf.
4667 Label msg_address;
4668 Adr(x0, &msg_address);
4669
4670 // Call Printf directly to report the error.
4671 CallPrintf();
4672
4673 // We need a way to stop execution on both the simulator and real hardware,
4674 // and Unreachable() is the best option.
4675 Unreachable();
4676
4677 // Emit the message string directly in the instruction stream.
4678 {
4679 BlockPoolsScope scope(this);
4680 Bind(&msg_address);
4681 EmitStringData(GetBailoutReason(reason));
4682 }
4683 }
4684
4685 SetStackPointer(old_stack_pointer);
4686 TmpList()->set_list(old_tmp_list);
4687}
4688
4689
4690void MacroAssembler::LoadTransitionedArrayMapConditional(
4691 ElementsKind expected_kind,
4692 ElementsKind transitioned_kind,
4693 Register map_in_out,
4694 Register scratch1,
4695 Register scratch2,
4696 Label* no_map_match) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004697 DCHECK(IsFastElementsKind(expected_kind));
4698 DCHECK(IsFastElementsKind(transitioned_kind));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004699
4700 // Check that the function's map is the same as the expected cached map.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004701 Ldr(scratch1, NativeContextMemOperand());
4702 Ldr(scratch2,
4703 ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004704 Cmp(map_in_out, scratch2);
4705 B(ne, no_map_match);
4706
4707 // Use the transitioned cached map.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004708 Ldr(map_in_out,
4709 ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004710}
4711
4712
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004713void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
4714 Ldr(dst, NativeContextMemOperand());
4715 Ldr(dst, ContextMemOperand(dst, index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004716}
4717
4718
4719void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4720 Register map,
4721 Register scratch) {
4722 // Load the initial map. The global functions all have initial maps.
4723 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4724 if (emit_debug_code()) {
4725 Label ok, fail;
4726 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4727 B(&ok);
4728 Bind(&fail);
4729 Abort(kGlobalFunctionsMustHaveInitialMap);
4730 Bind(&ok);
4731 }
4732}
4733
4734
4735// This is the main Printf implementation. All other Printf variants call
4736// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
4737void MacroAssembler::PrintfNoPreserve(const char * format,
4738 const CPURegister& arg0,
4739 const CPURegister& arg1,
4740 const CPURegister& arg2,
4741 const CPURegister& arg3) {
4742 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4743 // in most cases anyway, so this restriction shouldn't be too serious.
4744 DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4745
4746 // The provided arguments, and their proper procedure-call standard registers.
4747 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
4748 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
4749
4750 int arg_count = kPrintfMaxArgCount;
4751
4752 // The PCS varargs registers for printf. Note that x0 is used for the printf
4753 // format string.
4754 static const CPURegList kPCSVarargs =
4755 CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
4756 static const CPURegList kPCSVarargsFP =
4757 CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
4758
4759 // We can use caller-saved registers as scratch values, except for the
4760 // arguments and the PCS registers where they might need to go.
4761 CPURegList tmp_list = kCallerSaved;
4762 tmp_list.Remove(x0); // Used to pass the format string.
4763 tmp_list.Remove(kPCSVarargs);
4764 tmp_list.Remove(arg0, arg1, arg2, arg3);
4765
4766 CPURegList fp_tmp_list = kCallerSavedFP;
4767 fp_tmp_list.Remove(kPCSVarargsFP);
4768 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4769
4770 // Override the MacroAssembler's scratch register list. The lists will be
4771 // reset automatically at the end of the UseScratchRegisterScope.
4772 UseScratchRegisterScope temps(this);
4773 TmpList()->set_list(tmp_list.list());
4774 FPTmpList()->set_list(fp_tmp_list.list());
4775
4776 // Copies of the printf vararg registers that we can pop from.
4777 CPURegList pcs_varargs = kPCSVarargs;
4778 CPURegList pcs_varargs_fp = kPCSVarargsFP;
4779
4780 // Place the arguments. There are lots of clever tricks and optimizations we
4781 // could use here, but Printf is a debug tool so instead we just try to keep
4782 // it simple: Move each input that isn't already in the right place to a
4783 // scratch register, then move everything back.
4784 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
4785 // Work out the proper PCS register for this argument.
4786 if (args[i].IsRegister()) {
4787 pcs[i] = pcs_varargs.PopLowestIndex().X();
4788 // We might only need a W register here. We need to know the size of the
4789 // argument so we can properly encode it for the simulator call.
4790 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
4791 } else if (args[i].IsFPRegister()) {
4792 // In C, floats are always cast to doubles for varargs calls.
4793 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
4794 } else {
4795 DCHECK(args[i].IsNone());
4796 arg_count = i;
4797 break;
4798 }
4799
4800 // If the argument is already in the right place, leave it where it is.
4801 if (args[i].Aliases(pcs[i])) continue;
4802
4803 // Otherwise, if the argument is in a PCS argument register, allocate an
4804 // appropriate scratch register and then move it out of the way.
4805 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
4806 kPCSVarargsFP.IncludesAliasOf(args[i])) {
4807 if (args[i].IsRegister()) {
4808 Register old_arg = Register(args[i]);
4809 Register new_arg = temps.AcquireSameSizeAs(old_arg);
4810 Mov(new_arg, old_arg);
4811 args[i] = new_arg;
4812 } else {
4813 FPRegister old_arg = FPRegister(args[i]);
4814 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
4815 Fmov(new_arg, old_arg);
4816 args[i] = new_arg;
4817 }
4818 }
4819 }
4820
4821 // Do a second pass to move values into their final positions and perform any
4822 // conversions that may be required.
4823 for (int i = 0; i < arg_count; i++) {
4824 DCHECK(pcs[i].type() == args[i].type());
4825 if (pcs[i].IsRegister()) {
4826 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
4827 } else {
4828 DCHECK(pcs[i].IsFPRegister());
4829 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
4830 Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
4831 } else {
4832 Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
4833 }
4834 }
4835 }
4836
4837 // Load the format string into x0, as per the procedure-call standard.
4838 //
4839 // To make the code as portable as possible, the format string is encoded
4840 // directly in the instruction stream. It might be cleaner to encode it in a
4841 // literal pool, but since Printf is usually used for debugging, it is
4842 // beneficial for it to be minimally dependent on other features.
4843 Label format_address;
4844 Adr(x0, &format_address);
4845
4846 // Emit the format string directly in the instruction stream.
4847 { BlockPoolsScope scope(this);
4848 Label after_data;
4849 B(&after_data);
4850 Bind(&format_address);
4851 EmitStringData(format);
4852 Unreachable();
4853 Bind(&after_data);
4854 }
4855
4856 // We don't pass any arguments on the stack, but we still need to align the C
4857 // stack pointer to a 16-byte boundary for PCS compliance.
4858 if (!csp.Is(StackPointer())) {
4859 Bic(csp, StackPointer(), 0xf);
4860 }
4861
4862 CallPrintf(arg_count, pcs);
4863}
4864
4865
4866void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
4867 // A call to printf needs special handling for the simulator, since the system
4868 // printf function will use a different instruction set and the procedure-call
4869 // standard will not be compatible.
4870#ifdef USE_SIMULATOR
4871 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
4872 hlt(kImmExceptionIsPrintf);
4873 dc32(arg_count); // kPrintfArgCountOffset
4874
4875 // Determine the argument pattern.
4876 uint32_t arg_pattern_list = 0;
4877 for (int i = 0; i < arg_count; i++) {
4878 uint32_t arg_pattern;
4879 if (args[i].IsRegister()) {
4880 arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
4881 } else {
4882 DCHECK(args[i].Is64Bits());
4883 arg_pattern = kPrintfArgD;
4884 }
4885 DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
4886 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
4887 }
4888 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
4889 }
4890#else
4891 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
4892#endif
4893}
4894
4895
4896void MacroAssembler::Printf(const char * format,
4897 CPURegister arg0,
4898 CPURegister arg1,
4899 CPURegister arg2,
4900 CPURegister arg3) {
4901 // We can only print sp if it is the current stack pointer.
4902 if (!csp.Is(StackPointer())) {
4903 DCHECK(!csp.Aliases(arg0));
4904 DCHECK(!csp.Aliases(arg1));
4905 DCHECK(!csp.Aliases(arg2));
4906 DCHECK(!csp.Aliases(arg3));
4907 }
4908
4909 // Printf is expected to preserve all registers, so make sure that none are
4910 // available as scratch registers until we've preserved them.
4911 RegList old_tmp_list = TmpList()->list();
4912 RegList old_fp_tmp_list = FPTmpList()->list();
4913 TmpList()->set_list(0);
4914 FPTmpList()->set_list(0);
4915
4916 // Preserve all caller-saved registers as well as NZCV.
4917 // If csp is the stack pointer, PushCPURegList asserts that the size of each
4918 // list is a multiple of 16 bytes.
4919 PushCPURegList(kCallerSaved);
4920 PushCPURegList(kCallerSavedFP);
4921
4922 // We can use caller-saved registers as scratch values (except for argN).
4923 CPURegList tmp_list = kCallerSaved;
4924 CPURegList fp_tmp_list = kCallerSavedFP;
4925 tmp_list.Remove(arg0, arg1, arg2, arg3);
4926 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4927 TmpList()->set_list(tmp_list.list());
4928 FPTmpList()->set_list(fp_tmp_list.list());
4929
4930 { UseScratchRegisterScope temps(this);
4931 // If any of the arguments are the current stack pointer, allocate a new
4932 // register for them, and adjust the value to compensate for pushing the
4933 // caller-saved registers.
4934 bool arg0_sp = StackPointer().Aliases(arg0);
4935 bool arg1_sp = StackPointer().Aliases(arg1);
4936 bool arg2_sp = StackPointer().Aliases(arg2);
4937 bool arg3_sp = StackPointer().Aliases(arg3);
4938 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
4939 // Allocate a register to hold the original stack pointer value, to pass
4940 // to PrintfNoPreserve as an argument.
4941 Register arg_sp = temps.AcquireX();
4942 Add(arg_sp, StackPointer(),
4943 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
4944 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
4945 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
4946 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
4947 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
4948 }
4949
4950 // Preserve NZCV.
4951 { UseScratchRegisterScope temps(this);
4952 Register tmp = temps.AcquireX();
4953 Mrs(tmp, NZCV);
4954 Push(tmp, xzr);
4955 }
4956
4957 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
4958
4959 // Restore NZCV.
4960 { UseScratchRegisterScope temps(this);
4961 Register tmp = temps.AcquireX();
4962 Pop(xzr, tmp);
4963 Msr(NZCV, tmp);
4964 }
4965 }
4966
4967 PopCPURegList(kCallerSavedFP);
4968 PopCPURegList(kCallerSaved);
4969
4970 TmpList()->set_list(old_tmp_list);
4971 FPTmpList()->set_list(old_fp_tmp_list);
4972}
4973
4974
4975void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
4976 // TODO(jbramley): Other architectures use the internal memcpy to copy the
4977 // sequence. If this is a performance bottleneck, we should consider caching
4978 // the sequence and copying it in the same way.
4979 InstructionAccurateScope scope(this,
4980 kNoCodeAgeSequenceLength / kInstructionSize);
4981 DCHECK(jssp.Is(StackPointer()));
4982 EmitFrameSetupForCodeAgePatching(this);
4983}
4984
4985
4986
4987void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
4988 InstructionAccurateScope scope(this,
4989 kNoCodeAgeSequenceLength / kInstructionSize);
4990 DCHECK(jssp.Is(StackPointer()));
4991 EmitCodeAgeSequence(this, stub);
4992}
4993
4994
4995#undef __
4996#define __ assm->
4997
4998
4999void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
5000 Label start;
5001 __ bind(&start);
5002
5003 // We can do this sequence using four instructions, but the code ageing
5004 // sequence that patches it needs five, so we use the extra space to try to
5005 // simplify some addressing modes and remove some dependencies (compared to
5006 // using two stp instructions with write-back).
5007 __ sub(jssp, jssp, 4 * kXRegSize);
5008 __ sub(csp, csp, 4 * kXRegSize);
5009 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
5010 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
5011 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
5012
5013 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
5014}
5015
5016
5017void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
5018 Code * stub) {
5019 Label start;
5020 __ bind(&start);
5021 // When the stub is called, the sequence is replaced with the young sequence
5022 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
5023 // stub jumps to &start, stored in x0. The young sequence does not call the
5024 // stub so there is no infinite loop here.
5025 //
5026 // A branch (br) is used rather than a call (blr) because this code replaces
5027 // the frame setup code that would normally preserve lr.
5028 __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
5029 __ adr(x0, &start);
5030 __ br(ip0);
5031 // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
5032 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
5033 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
5034 if (stub) {
5035 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
5036 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
5037 }
5038}
5039
5040
5041bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
5042 bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
5043 DCHECK(is_young ||
5044 isolate->code_aging_helper()->IsOld(sequence));
5045 return is_young;
5046}
5047
5048
5049void MacroAssembler::TruncatingDiv(Register result,
5050 Register dividend,
5051 int32_t divisor) {
5052 DCHECK(!AreAliased(result, dividend));
5053 DCHECK(result.Is32Bits() && dividend.Is32Bits());
5054 base::MagicNumbersForDivision<uint32_t> mag =
5055 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5056 Mov(result, mag.multiplier);
5057 Smull(result.X(), dividend, result);
5058 Asr(result.X(), result.X(), 32);
5059 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5060 if (divisor > 0 && neg) Add(result, result, dividend);
5061 if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
5062 if (mag.shift > 0) Asr(result, result, mag.shift);
5063 Add(result, result, Operand(dividend, LSR, 31));
5064}
5065
5066
5067#undef __
5068
5069
5070UseScratchRegisterScope::~UseScratchRegisterScope() {
5071 available_->set_list(old_available_);
5072 availablefp_->set_list(old_availablefp_);
5073}
5074
5075
5076Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
5077 int code = AcquireNextAvailable(available_).code();
5078 return Register::Create(code, reg.SizeInBits());
5079}
5080
5081
5082FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
5083 int code = AcquireNextAvailable(availablefp_).code();
5084 return FPRegister::Create(code, reg.SizeInBits());
5085}
5086
5087
5088CPURegister UseScratchRegisterScope::AcquireNextAvailable(
5089 CPURegList* available) {
5090 CHECK(!available->IsEmpty());
5091 CPURegister result = available->PopLowestIndex();
5092 DCHECK(!AreAliased(result, xzr, csp));
5093 return result;
5094}
5095
5096
5097CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
5098 const CPURegister& reg) {
5099 DCHECK(available->IncludesAliasOf(reg));
5100 available->Remove(reg);
5101 return reg;
5102}
5103
5104
5105#define __ masm->
5106
5107
5108void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
5109 const Label* smi_check) {
5110 Assembler::BlockPoolsScope scope(masm);
5111 if (reg.IsValid()) {
5112 DCHECK(smi_check->is_bound());
5113 DCHECK(reg.Is64Bits());
5114
5115 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
5116 // 'check' in the other bits. The possible offset is limited in that we
5117 // use BitField to pack the data, and the underlying data type is a
5118 // uint32_t.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005119 uint32_t delta =
5120 static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005121 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
5122 } else {
5123 DCHECK(!smi_check->is_bound());
5124
5125 // An offset of 0 indicates that there is no patch site.
5126 __ InlineData(0);
5127 }
5128}
5129
5130
5131InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
5132 : reg_(NoReg), smi_check_(NULL) {
5133 InstructionSequence* inline_data = InstructionSequence::At(info);
5134 DCHECK(inline_data->IsInlineData());
5135 if (inline_data->IsInlineData()) {
5136 uint64_t payload = inline_data->InlineData();
5137 // We use BitField to decode the payload, and BitField can only handle
5138 // 32-bit values.
5139 DCHECK(is_uint32(payload));
5140 if (payload != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005141 uint32_t payload32 = static_cast<uint32_t>(payload);
5142 int reg_code = RegisterBits::decode(payload32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005143 reg_ = Register::XRegFromCode(reg_code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005144 int smi_check_delta = DeltaBits::decode(payload32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005145 DCHECK(smi_check_delta != 0);
5146 smi_check_ = inline_data->preceding(smi_check_delta);
5147 }
5148 }
5149}
5150
5151
5152#undef __
5153
5154
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005155} // namespace internal
5156} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005157
5158#endif // V8_TARGET_ARCH_ARM64