blob: 83b33b7abab30ec76d01c7922403a5b0b1cf8f43 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#if V8_TARGET_ARCH_ARM64
6
7#include "src/base/bits.h"
8#include "src/base/division-by-constant.h"
9#include "src/bootstrapper.h"
10#include "src/codegen.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011#include "src/debug/debug.h"
12#include "src/register-configuration.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -040013#include "src/runtime/runtime.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000015#include "src/arm64/frames-arm64.h"
16#include "src/arm64/macro-assembler-arm64.h"
17
Ben Murdochb8a8cc12014-11-26 15:28:44 +000018namespace v8 {
19namespace internal {
20
21// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
22#define __
23
24
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000025MacroAssembler::MacroAssembler(Isolate* arg_isolate, byte* buffer,
26 unsigned buffer_size,
27 CodeObjectRequired create_code_object)
Ben Murdochb8a8cc12014-11-26 15:28:44 +000028 : Assembler(arg_isolate, buffer, buffer_size),
29 generating_stub_(false),
30#if DEBUG
31 allow_macro_instructions_(true),
32#endif
33 has_frame_(false),
34 use_real_aborts_(true),
35 sp_(jssp),
36 tmp_list_(DefaultTmpList()),
37 fptmp_list_(DefaultFPTmpList()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000038 if (create_code_object == CodeObjectRequired::kYes) {
39 code_object_ =
40 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000041 }
42}
43
44
45CPURegList MacroAssembler::DefaultTmpList() {
46 return CPURegList(ip0, ip1);
47}
48
49
50CPURegList MacroAssembler::DefaultFPTmpList() {
51 return CPURegList(fp_scratch1, fp_scratch2);
52}
53
54
55void MacroAssembler::LogicalMacro(const Register& rd,
56 const Register& rn,
57 const Operand& operand,
58 LogicalOp op) {
59 UseScratchRegisterScope temps(this);
60
61 if (operand.NeedsRelocation(this)) {
62 Register temp = temps.AcquireX();
63 Ldr(temp, operand.immediate());
64 Logical(rd, rn, temp, op);
65
66 } else if (operand.IsImmediate()) {
67 int64_t immediate = operand.ImmediateValue();
68 unsigned reg_size = rd.SizeInBits();
69
70 // If the operation is NOT, invert the operation and immediate.
71 if ((op & NOT) == NOT) {
72 op = static_cast<LogicalOp>(op & ~NOT);
73 immediate = ~immediate;
74 }
75
76 // Ignore the top 32 bits of an immediate if we're moving to a W register.
77 if (rd.Is32Bits()) {
78 // Check that the top 32 bits are consistent.
79 DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
80 ((immediate >> kWRegSizeInBits) == -1));
81 immediate &= kWRegMask;
82 }
83
84 DCHECK(rd.Is64Bits() || is_uint32(immediate));
85
86 // Special cases for all set or all clear immediates.
87 if (immediate == 0) {
88 switch (op) {
89 case AND:
90 Mov(rd, 0);
91 return;
92 case ORR: // Fall through.
93 case EOR:
94 Mov(rd, rn);
95 return;
96 case ANDS: // Fall through.
97 case BICS:
98 break;
99 default:
100 UNREACHABLE();
101 }
102 } else if ((rd.Is64Bits() && (immediate == -1L)) ||
103 (rd.Is32Bits() && (immediate == 0xffffffffL))) {
104 switch (op) {
105 case AND:
106 Mov(rd, rn);
107 return;
108 case ORR:
109 Mov(rd, immediate);
110 return;
111 case EOR:
112 Mvn(rd, rn);
113 return;
114 case ANDS: // Fall through.
115 case BICS:
116 break;
117 default:
118 UNREACHABLE();
119 }
120 }
121
122 unsigned n, imm_s, imm_r;
123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
124 // Immediate can be encoded in the instruction.
125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
126 } else {
127 // Immediate can't be encoded: synthesize using move immediate.
128 Register temp = temps.AcquireSameSizeAs(rn);
129 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
130 if (rd.Is(csp)) {
131 // If rd is the stack pointer we cannot use it as the destination
132 // register so we use the temp register as an intermediate again.
133 Logical(temp, rn, imm_operand, op);
134 Mov(csp, temp);
135 AssertStackConsistency();
136 } else {
137 Logical(rd, rn, imm_operand, op);
138 }
139 }
140
141 } else if (operand.IsExtendedRegister()) {
142 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
143 // Add/sub extended supports shift <= 4. We want to support exactly the
144 // same modes here.
145 DCHECK(operand.shift_amount() <= 4);
146 DCHECK(operand.reg().Is64Bits() ||
147 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
148 Register temp = temps.AcquireSameSizeAs(rn);
149 EmitExtendShift(temp, operand.reg(), operand.extend(),
150 operand.shift_amount());
151 Logical(rd, rn, temp, op);
152
153 } else {
154 // The operand can be encoded in the instruction.
155 DCHECK(operand.IsShiftedRegister());
156 Logical(rd, rn, operand, op);
157 }
158}
159
160
161void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
162 DCHECK(allow_macro_instructions_);
163 DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
164 DCHECK(!rd.IsZero());
165
166 // TODO(all) extend to support more immediates.
167 //
168 // Immediates on Aarch64 can be produced using an initial value, and zero to
169 // three move keep operations.
170 //
171 // Initial values can be generated with:
172 // 1. 64-bit move zero (movz).
173 // 2. 32-bit move inverted (movn).
174 // 3. 64-bit move inverted.
175 // 4. 32-bit orr immediate.
176 // 5. 64-bit orr immediate.
177 // Move-keep may then be used to modify each of the 16-bit half-words.
178 //
179 // The code below supports all five initial value generators, and
180 // applying move-keep operations to move-zero and move-inverted initial
181 // values.
182
183 // Try to move the immediate in one instruction, and if that fails, switch to
184 // using multiple instructions.
185 if (!TryOneInstrMoveImmediate(rd, imm)) {
186 unsigned reg_size = rd.SizeInBits();
187
188 // Generic immediate case. Imm will be represented by
189 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
190 // A move-zero or move-inverted is generated for the first non-zero or
191 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
192
193 uint64_t ignored_halfword = 0;
194 bool invert_move = false;
195 // If the number of 0xffff halfwords is greater than the number of 0x0000
196 // halfwords, it's more efficient to use move-inverted.
197 if (CountClearHalfWords(~imm, reg_size) >
198 CountClearHalfWords(imm, reg_size)) {
199 ignored_halfword = 0xffffL;
200 invert_move = true;
201 }
202
203 // Mov instructions can't move immediate values into the stack pointer, so
204 // set up a temporary register, if needed.
205 UseScratchRegisterScope temps(this);
206 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
207
208 // Iterate through the halfwords. Use movn/movz for the first non-ignored
209 // halfword, and movk for subsequent halfwords.
210 DCHECK((reg_size % 16) == 0);
211 bool first_mov_done = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000212 for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000213 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
214 if (imm16 != ignored_halfword) {
215 if (!first_mov_done) {
216 if (invert_move) {
217 movn(temp, (~imm16) & 0xffffL, 16 * i);
218 } else {
219 movz(temp, imm16, 16 * i);
220 }
221 first_mov_done = true;
222 } else {
223 // Construct a wider constant.
224 movk(temp, imm16, 16 * i);
225 }
226 }
227 }
228 DCHECK(first_mov_done);
229
230 // Move the temporary if the original destination register was the stack
231 // pointer.
232 if (rd.IsSP()) {
233 mov(rd, temp);
234 AssertStackConsistency();
235 }
236 }
237}
238
239
240void MacroAssembler::Mov(const Register& rd,
241 const Operand& operand,
242 DiscardMoveMode discard_mode) {
243 DCHECK(allow_macro_instructions_);
244 DCHECK(!rd.IsZero());
245
246 // Provide a swap register for instructions that need to write into the
247 // system stack pointer (and can't do this inherently).
248 UseScratchRegisterScope temps(this);
249 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
250
251 if (operand.NeedsRelocation(this)) {
252 Ldr(dst, operand.immediate());
253
254 } else if (operand.IsImmediate()) {
255 // Call the macro assembler for generic immediates.
256 Mov(dst, operand.ImmediateValue());
257
258 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
259 // Emit a shift instruction if moving a shifted register. This operation
260 // could also be achieved using an orr instruction (like orn used by Mvn),
261 // but using a shift instruction makes the disassembly clearer.
262 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
263
264 } else if (operand.IsExtendedRegister()) {
265 // Emit an extend instruction if moving an extended register. This handles
266 // extend with post-shift operations, too.
267 EmitExtendShift(dst, operand.reg(), operand.extend(),
268 operand.shift_amount());
269
270 } else {
271 // Otherwise, emit a register move only if the registers are distinct, or
272 // if they are not X registers.
273 //
274 // Note that mov(w0, w0) is not a no-op because it clears the top word of
275 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
276 // registers is not required to clear the top word of the X register. In
277 // this case, the instruction is discarded.
278 //
279 // If csp is an operand, add #0 is emitted, otherwise, orr #0.
280 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
281 (discard_mode == kDontDiscardForSameWReg))) {
282 Assembler::mov(rd, operand.reg());
283 }
284 // This case can handle writes into the system stack pointer directly.
285 dst = rd;
286 }
287
288 // Copy the result to the system stack pointer.
289 if (!dst.Is(rd)) {
290 DCHECK(rd.IsSP());
291 Assembler::mov(rd, dst);
292 }
293}
294
295
296void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
297 DCHECK(allow_macro_instructions_);
298
299 if (operand.NeedsRelocation(this)) {
300 Ldr(rd, operand.immediate());
301 mvn(rd, rd);
302
303 } else if (operand.IsImmediate()) {
304 // Call the macro assembler for generic immediates.
305 Mov(rd, ~operand.ImmediateValue());
306
307 } else if (operand.IsExtendedRegister()) {
308 // Emit two instructions for the extend case. This differs from Mov, as
309 // the extend and invert can't be achieved in one instruction.
310 EmitExtendShift(rd, operand.reg(), operand.extend(),
311 operand.shift_amount());
312 mvn(rd, rd);
313
314 } else {
315 mvn(rd, operand);
316 }
317}
318
319
320unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
321 DCHECK((reg_size % 8) == 0);
322 int count = 0;
323 for (unsigned i = 0; i < (reg_size / 16); i++) {
324 if ((imm & 0xffff) == 0) {
325 count++;
326 }
327 imm >>= 16;
328 }
329 return count;
330}
331
332
333// The movz instruction can generate immediates containing an arbitrary 16-bit
334// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
335bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
336 DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
337 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
338}
339
340
341// The movn instruction can generate immediates containing an arbitrary 16-bit
342// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
343bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
344 return IsImmMovz(~imm, reg_size);
345}
346
347
348void MacroAssembler::ConditionalCompareMacro(const Register& rn,
349 const Operand& operand,
350 StatusFlags nzcv,
351 Condition cond,
352 ConditionalCompareOp op) {
353 DCHECK((cond != al) && (cond != nv));
354 if (operand.NeedsRelocation(this)) {
355 UseScratchRegisterScope temps(this);
356 Register temp = temps.AcquireX();
357 Ldr(temp, operand.immediate());
358 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
359
360 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
361 (operand.IsImmediate() &&
362 IsImmConditionalCompare(operand.ImmediateValue()))) {
363 // The immediate can be encoded in the instruction, or the operand is an
364 // unshifted register: call the assembler.
365 ConditionalCompare(rn, operand, nzcv, cond, op);
366
367 } else {
368 // The operand isn't directly supported by the instruction: perform the
369 // operation on a temporary register.
370 UseScratchRegisterScope temps(this);
371 Register temp = temps.AcquireSameSizeAs(rn);
372 Mov(temp, operand);
373 ConditionalCompare(rn, temp, nzcv, cond, op);
374 }
375}
376
377
378void MacroAssembler::Csel(const Register& rd,
379 const Register& rn,
380 const Operand& operand,
381 Condition cond) {
382 DCHECK(allow_macro_instructions_);
383 DCHECK(!rd.IsZero());
384 DCHECK((cond != al) && (cond != nv));
385 if (operand.IsImmediate()) {
386 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
387 // register.
388 int64_t imm = operand.ImmediateValue();
389 Register zr = AppropriateZeroRegFor(rn);
390 if (imm == 0) {
391 csel(rd, rn, zr, cond);
392 } else if (imm == 1) {
393 csinc(rd, rn, zr, cond);
394 } else if (imm == -1) {
395 csinv(rd, rn, zr, cond);
396 } else {
397 UseScratchRegisterScope temps(this);
398 Register temp = temps.AcquireSameSizeAs(rn);
399 Mov(temp, imm);
400 csel(rd, rn, temp, cond);
401 }
402 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
403 // Unshifted register argument.
404 csel(rd, rn, operand.reg(), cond);
405 } else {
406 // All other arguments.
407 UseScratchRegisterScope temps(this);
408 Register temp = temps.AcquireSameSizeAs(rn);
409 Mov(temp, operand);
410 csel(rd, rn, temp, cond);
411 }
412}
413
414
415bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
416 int64_t imm) {
417 unsigned n, imm_s, imm_r;
418 int reg_size = dst.SizeInBits();
419 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
420 // Immediate can be represented in a move zero instruction. Movz can't write
421 // to the stack pointer.
422 movz(dst, imm);
423 return true;
424 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
425 // Immediate can be represented in a move not instruction. Movn can't write
426 // to the stack pointer.
427 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
428 return true;
429 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
430 // Immediate can be represented in a logical orr instruction.
431 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
432 return true;
433 }
434 return false;
435}
436
437
438Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
439 int64_t imm) {
440 int reg_size = dst.SizeInBits();
441
442 // Encode the immediate in a single move instruction, if possible.
443 if (TryOneInstrMoveImmediate(dst, imm)) {
444 // The move was successful; nothing to do here.
445 } else {
446 // Pre-shift the immediate to the least-significant bits of the register.
447 int shift_low = CountTrailingZeros(imm, reg_size);
448 int64_t imm_low = imm >> shift_low;
449
450 // Pre-shift the immediate to the most-significant bits of the register. We
451 // insert set bits in the least-significant bits, as this creates a
452 // different immediate that may be encodable using movn or orr-immediate.
453 // If this new immediate is encodable, the set bits will be eliminated by
454 // the post shift on the following instruction.
455 int shift_high = CountLeadingZeros(imm, reg_size);
456 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
457
458 if (TryOneInstrMoveImmediate(dst, imm_low)) {
459 // The new immediate has been moved into the destination's low bits:
460 // return a new leftward-shifting operand.
461 return Operand(dst, LSL, shift_low);
462 } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
463 // The new immediate has been moved into the destination's high bits:
464 // return a new rightward-shifting operand.
465 return Operand(dst, LSR, shift_high);
466 } else {
467 // Use the generic move operation to set up the immediate.
468 Mov(dst, imm);
469 }
470 }
471 return Operand(dst);
472}
473
474
475void MacroAssembler::AddSubMacro(const Register& rd,
476 const Register& rn,
477 const Operand& operand,
478 FlagsUpdate S,
479 AddSubOp op) {
480 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
481 !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
482 // The instruction would be a nop. Avoid generating useless code.
483 return;
484 }
485
486 if (operand.NeedsRelocation(this)) {
487 UseScratchRegisterScope temps(this);
488 Register temp = temps.AcquireX();
489 Ldr(temp, operand.immediate());
490 AddSubMacro(rd, rn, temp, S, op);
491 } else if ((operand.IsImmediate() &&
492 !IsImmAddSub(operand.ImmediateValue())) ||
493 (rn.IsZero() && !operand.IsShiftedRegister()) ||
494 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
495 UseScratchRegisterScope temps(this);
496 Register temp = temps.AcquireSameSizeAs(rn);
497 if (operand.IsImmediate()) {
498 Operand imm_operand =
499 MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
500 AddSub(rd, rn, imm_operand, S, op);
501 } else {
502 Mov(temp, operand);
503 AddSub(rd, rn, temp, S, op);
504 }
505 } else {
506 AddSub(rd, rn, operand, S, op);
507 }
508}
509
510
511void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
512 const Register& rn,
513 const Operand& operand,
514 FlagsUpdate S,
515 AddSubWithCarryOp op) {
516 DCHECK(rd.SizeInBits() == rn.SizeInBits());
517 UseScratchRegisterScope temps(this);
518
519 if (operand.NeedsRelocation(this)) {
520 Register temp = temps.AcquireX();
521 Ldr(temp, operand.immediate());
522 AddSubWithCarryMacro(rd, rn, temp, S, op);
523
524 } else if (operand.IsImmediate() ||
525 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
526 // Add/sub with carry (immediate or ROR shifted register.)
527 Register temp = temps.AcquireSameSizeAs(rn);
528 Mov(temp, operand);
529 AddSubWithCarry(rd, rn, temp, S, op);
530
531 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
532 // Add/sub with carry (shifted register).
533 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
534 DCHECK(operand.shift() != ROR);
535 DCHECK(is_uintn(operand.shift_amount(),
536 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
537 : kWRegSizeInBitsLog2));
538 Register temp = temps.AcquireSameSizeAs(rn);
539 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
540 AddSubWithCarry(rd, rn, temp, S, op);
541
542 } else if (operand.IsExtendedRegister()) {
543 // Add/sub with carry (extended register).
544 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
545 // Add/sub extended supports a shift <= 4. We want to support exactly the
546 // same modes.
547 DCHECK(operand.shift_amount() <= 4);
548 DCHECK(operand.reg().Is64Bits() ||
549 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
550 Register temp = temps.AcquireSameSizeAs(rn);
551 EmitExtendShift(temp, operand.reg(), operand.extend(),
552 operand.shift_amount());
553 AddSubWithCarry(rd, rn, temp, S, op);
554
555 } else {
556 // The addressing mode is directly supported by the instruction.
557 AddSubWithCarry(rd, rn, operand, S, op);
558 }
559}
560
561
562void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
563 const MemOperand& addr,
564 LoadStoreOp op) {
565 int64_t offset = addr.offset();
566 LSDataSize size = CalcLSDataSize(op);
567
568 // Check if an immediate offset fits in the immediate field of the
569 // appropriate instruction. If not, emit two instructions to perform
570 // the operation.
571 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
572 !IsImmLSUnscaled(offset)) {
573 // Immediate offset that can't be encoded using unsigned or unscaled
574 // addressing modes.
575 UseScratchRegisterScope temps(this);
576 Register temp = temps.AcquireSameSizeAs(addr.base());
577 Mov(temp, addr.offset());
578 LoadStore(rt, MemOperand(addr.base(), temp), op);
579 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
580 // Post-index beyond unscaled addressing range.
581 LoadStore(rt, MemOperand(addr.base()), op);
582 add(addr.base(), addr.base(), offset);
583 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
584 // Pre-index beyond unscaled addressing range.
585 add(addr.base(), addr.base(), offset);
586 LoadStore(rt, MemOperand(addr.base()), op);
587 } else {
588 // Encodable in one load/store instruction.
589 LoadStore(rt, addr, op);
590 }
591}
592
593void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
594 const CPURegister& rt2,
595 const MemOperand& addr,
596 LoadStorePairOp op) {
597 // TODO(all): Should we support register offset for load-store-pair?
598 DCHECK(!addr.IsRegisterOffset());
599
600 int64_t offset = addr.offset();
601 LSDataSize size = CalcLSPairDataSize(op);
602
603 // Check if the offset fits in the immediate field of the appropriate
604 // instruction. If not, emit two instructions to perform the operation.
605 if (IsImmLSPair(offset, size)) {
606 // Encodable in one load/store pair instruction.
607 LoadStorePair(rt, rt2, addr, op);
608 } else {
609 Register base = addr.base();
610 if (addr.IsImmediateOffset()) {
611 UseScratchRegisterScope temps(this);
612 Register temp = temps.AcquireSameSizeAs(base);
613 Add(temp, base, offset);
614 LoadStorePair(rt, rt2, MemOperand(temp), op);
615 } else if (addr.IsPostIndex()) {
616 LoadStorePair(rt, rt2, MemOperand(base), op);
617 Add(base, base, offset);
618 } else {
619 DCHECK(addr.IsPreIndex());
620 Add(base, base, offset);
621 LoadStorePair(rt, rt2, MemOperand(base), op);
622 }
623 }
624}
625
626
627void MacroAssembler::Load(const Register& rt,
628 const MemOperand& addr,
629 Representation r) {
630 DCHECK(!r.IsDouble());
631
632 if (r.IsInteger8()) {
633 Ldrsb(rt, addr);
634 } else if (r.IsUInteger8()) {
635 Ldrb(rt, addr);
636 } else if (r.IsInteger16()) {
637 Ldrsh(rt, addr);
638 } else if (r.IsUInteger16()) {
639 Ldrh(rt, addr);
640 } else if (r.IsInteger32()) {
641 Ldr(rt.W(), addr);
642 } else {
643 DCHECK(rt.Is64Bits());
644 Ldr(rt, addr);
645 }
646}
647
648
649void MacroAssembler::Store(const Register& rt,
650 const MemOperand& addr,
651 Representation r) {
652 DCHECK(!r.IsDouble());
653
654 if (r.IsInteger8() || r.IsUInteger8()) {
655 Strb(rt, addr);
656 } else if (r.IsInteger16() || r.IsUInteger16()) {
657 Strh(rt, addr);
658 } else if (r.IsInteger32()) {
659 Str(rt.W(), addr);
660 } else {
661 DCHECK(rt.Is64Bits());
662 if (r.IsHeapObject()) {
663 AssertNotSmi(rt);
664 } else if (r.IsSmi()) {
665 AssertSmi(rt);
666 }
667 Str(rt, addr);
668 }
669}
670
671
672bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
673 Label *label, ImmBranchType b_type) {
674 bool need_longer_range = false;
675 // There are two situations in which we care about the offset being out of
676 // range:
677 // - The label is bound but too far away.
678 // - The label is not bound but linked, and the previous branch
679 // instruction in the chain is too far away.
680 if (label->is_bound() || label->is_linked()) {
681 need_longer_range =
682 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
683 }
684 if (!need_longer_range && !label->is_bound()) {
685 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
686 unresolved_branches_.insert(
687 std::pair<int, FarBranchInfo>(max_reachable_pc,
688 FarBranchInfo(pc_offset(), label)));
689 // Also maintain the next pool check.
690 next_veneer_pool_check_ =
691 Min(next_veneer_pool_check_,
692 max_reachable_pc - kVeneerDistanceCheckMargin);
693 }
694 return need_longer_range;
695}
696
697
698void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
699 DCHECK(allow_macro_instructions_);
700 DCHECK(!rd.IsZero());
701
702 if (hint == kAdrNear) {
703 adr(rd, label);
704 return;
705 }
706
707 DCHECK(hint == kAdrFar);
708 if (label->is_bound()) {
709 int label_offset = label->pos() - pc_offset();
710 if (Instruction::IsValidPCRelOffset(label_offset)) {
711 adr(rd, label);
712 } else {
713 DCHECK(label_offset <= 0);
714 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
715 adr(rd, min_adr_offset);
716 Add(rd, rd, label_offset - min_adr_offset);
717 }
718 } else {
719 UseScratchRegisterScope temps(this);
720 Register scratch = temps.AcquireX();
721
722 InstructionAccurateScope scope(
723 this, PatchingAssembler::kAdrFarPatchableNInstrs);
724 adr(rd, label);
725 for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
726 nop(ADR_FAR_NOP);
727 }
728 movz(scratch, 0);
729 }
730}
731
732
733void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
734 DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
735 (bit == -1 || type >= kBranchTypeFirstUsingBit));
736 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
737 B(static_cast<Condition>(type), label);
738 } else {
739 switch (type) {
740 case always: B(label); break;
741 case never: break;
742 case reg_zero: Cbz(reg, label); break;
743 case reg_not_zero: Cbnz(reg, label); break;
744 case reg_bit_clear: Tbz(reg, bit, label); break;
745 case reg_bit_set: Tbnz(reg, bit, label); break;
746 default:
747 UNREACHABLE();
748 }
749 }
750}
751
752
753void MacroAssembler::B(Label* label, Condition cond) {
754 DCHECK(allow_macro_instructions_);
755 DCHECK((cond != al) && (cond != nv));
756
757 Label done;
758 bool need_extra_instructions =
759 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
760
761 if (need_extra_instructions) {
762 b(&done, NegateCondition(cond));
763 B(label);
764 } else {
765 b(label, cond);
766 }
767 bind(&done);
768}
769
770
771void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
772 DCHECK(allow_macro_instructions_);
773
774 Label done;
775 bool need_extra_instructions =
776 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
777
778 if (need_extra_instructions) {
779 tbz(rt, bit_pos, &done);
780 B(label);
781 } else {
782 tbnz(rt, bit_pos, label);
783 }
784 bind(&done);
785}
786
787
788void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
789 DCHECK(allow_macro_instructions_);
790
791 Label done;
792 bool need_extra_instructions =
793 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
794
795 if (need_extra_instructions) {
796 tbnz(rt, bit_pos, &done);
797 B(label);
798 } else {
799 tbz(rt, bit_pos, label);
800 }
801 bind(&done);
802}
803
804
805void MacroAssembler::Cbnz(const Register& rt, Label* label) {
806 DCHECK(allow_macro_instructions_);
807
808 Label done;
809 bool need_extra_instructions =
810 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
811
812 if (need_extra_instructions) {
813 cbz(rt, &done);
814 B(label);
815 } else {
816 cbnz(rt, label);
817 }
818 bind(&done);
819}
820
821
822void MacroAssembler::Cbz(const Register& rt, Label* label) {
823 DCHECK(allow_macro_instructions_);
824
825 Label done;
826 bool need_extra_instructions =
827 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
828
829 if (need_extra_instructions) {
830 cbnz(rt, &done);
831 B(label);
832 } else {
833 cbz(rt, label);
834 }
835 bind(&done);
836}
837
838
839// Pseudo-instructions.
840
841
842void MacroAssembler::Abs(const Register& rd, const Register& rm,
843 Label* is_not_representable,
844 Label* is_representable) {
845 DCHECK(allow_macro_instructions_);
846 DCHECK(AreSameSizeAndType(rd, rm));
847
848 Cmp(rm, 1);
849 Cneg(rd, rm, lt);
850
851 // If the comparison sets the v flag, the input was the smallest value
852 // representable by rm, and the mathematical result of abs(rm) is not
853 // representable using two's complement.
854 if ((is_not_representable != NULL) && (is_representable != NULL)) {
855 B(is_not_representable, vs);
856 B(is_representable);
857 } else if (is_not_representable != NULL) {
858 B(is_not_representable, vs);
859 } else if (is_representable != NULL) {
860 B(is_representable, vc);
861 }
862}
863
864
865// Abstracted stack operations.
866
867
868void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
869 const CPURegister& src2, const CPURegister& src3) {
870 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
871
872 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
873 int size = src0.SizeInBytes();
874
875 PushPreamble(count, size);
876 PushHelper(count, size, src0, src1, src2, src3);
877}
878
879
880void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
881 const CPURegister& src2, const CPURegister& src3,
882 const CPURegister& src4, const CPURegister& src5,
883 const CPURegister& src6, const CPURegister& src7) {
884 DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
885
886 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
887 int size = src0.SizeInBytes();
888
889 PushPreamble(count, size);
890 PushHelper(4, size, src0, src1, src2, src3);
891 PushHelper(count - 4, size, src4, src5, src6, src7);
892}
893
894
895void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
896 const CPURegister& dst2, const CPURegister& dst3) {
897 // It is not valid to pop into the same register more than once in one
898 // instruction, not even into the zero register.
899 DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
900 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
901 DCHECK(dst0.IsValid());
902
903 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
904 int size = dst0.SizeInBytes();
905
906 PopHelper(count, size, dst0, dst1, dst2, dst3);
907 PopPostamble(count, size);
908}
909
910
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000911void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
912 const CPURegister& dst2, const CPURegister& dst3,
913 const CPURegister& dst4, const CPURegister& dst5,
914 const CPURegister& dst6, const CPURegister& dst7) {
915 // It is not valid to pop into the same register more than once in one
916 // instruction, not even into the zero register.
917 DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
918 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
919 DCHECK(dst0.IsValid());
920
921 int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
922 int size = dst0.SizeInBytes();
923
924 PopHelper(4, size, dst0, dst1, dst2, dst3);
925 PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
926 PopPostamble(count, size);
927}
928
929
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000930void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
931 int size = src0.SizeInBytes() + src1.SizeInBytes();
932
933 PushPreamble(size);
934 // Reserve room for src0 and push src1.
935 str(src1, MemOperand(StackPointer(), -size, PreIndex));
936 // Fill the gap with src0.
937 str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
938}
939
940
941void MacroAssembler::PushPopQueue::PushQueued(
942 PreambleDirective preamble_directive) {
943 if (queued_.empty()) return;
944
945 if (preamble_directive == WITH_PREAMBLE) {
946 masm_->PushPreamble(size_);
947 }
948
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000949 size_t count = queued_.size();
950 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000951 while (index < count) {
952 // PushHelper can only handle registers with the same size and type, and it
953 // can handle only four at a time. Batch them up accordingly.
954 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
955 int batch_index = 0;
956 do {
957 batch[batch_index++] = queued_[index++];
958 } while ((batch_index < 4) && (index < count) &&
959 batch[0].IsSameSizeAndType(queued_[index]));
960
961 masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
962 batch[0], batch[1], batch[2], batch[3]);
963 }
964
965 queued_.clear();
966}
967
968
969void MacroAssembler::PushPopQueue::PopQueued() {
970 if (queued_.empty()) return;
971
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000972 size_t count = queued_.size();
973 size_t index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000974 while (index < count) {
975 // PopHelper can only handle registers with the same size and type, and it
976 // can handle only four at a time. Batch them up accordingly.
977 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
978 int batch_index = 0;
979 do {
980 batch[batch_index++] = queued_[index++];
981 } while ((batch_index < 4) && (index < count) &&
982 batch[0].IsSameSizeAndType(queued_[index]));
983
984 masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
985 batch[0], batch[1], batch[2], batch[3]);
986 }
987
988 masm_->PopPostamble(size_);
989 queued_.clear();
990}
991
992
993void MacroAssembler::PushCPURegList(CPURegList registers) {
994 int size = registers.RegisterSizeInBytes();
995
996 PushPreamble(registers.Count(), size);
997 // Push up to four registers at a time because if the current stack pointer is
998 // csp and reg_size is 32, registers must be pushed in blocks of four in order
999 // to maintain the 16-byte alignment for csp.
1000 while (!registers.IsEmpty()) {
1001 int count_before = registers.Count();
1002 const CPURegister& src0 = registers.PopHighestIndex();
1003 const CPURegister& src1 = registers.PopHighestIndex();
1004 const CPURegister& src2 = registers.PopHighestIndex();
1005 const CPURegister& src3 = registers.PopHighestIndex();
1006 int count = count_before - registers.Count();
1007 PushHelper(count, size, src0, src1, src2, src3);
1008 }
1009}
1010
1011
1012void MacroAssembler::PopCPURegList(CPURegList registers) {
1013 int size = registers.RegisterSizeInBytes();
1014
1015 // Pop up to four registers at a time because if the current stack pointer is
1016 // csp and reg_size is 32, registers must be pushed in blocks of four in
1017 // order to maintain the 16-byte alignment for csp.
1018 while (!registers.IsEmpty()) {
1019 int count_before = registers.Count();
1020 const CPURegister& dst0 = registers.PopLowestIndex();
1021 const CPURegister& dst1 = registers.PopLowestIndex();
1022 const CPURegister& dst2 = registers.PopLowestIndex();
1023 const CPURegister& dst3 = registers.PopLowestIndex();
1024 int count = count_before - registers.Count();
1025 PopHelper(count, size, dst0, dst1, dst2, dst3);
1026 }
1027 PopPostamble(registers.Count(), size);
1028}
1029
1030
1031void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
1032 int size = src.SizeInBytes();
1033
1034 PushPreamble(count, size);
1035
1036 if (FLAG_optimize_for_size && count > 8) {
1037 UseScratchRegisterScope temps(this);
1038 Register temp = temps.AcquireX();
1039
1040 Label loop;
1041 __ Mov(temp, count / 2);
1042 __ Bind(&loop);
1043 PushHelper(2, size, src, src, NoReg, NoReg);
1044 __ Subs(temp, temp, 1);
1045 __ B(ne, &loop);
1046
1047 count %= 2;
1048 }
1049
1050 // Push up to four registers at a time if possible because if the current
1051 // stack pointer is csp and the register size is 32, registers must be pushed
1052 // in blocks of four in order to maintain the 16-byte alignment for csp.
1053 while (count >= 4) {
1054 PushHelper(4, size, src, src, src, src);
1055 count -= 4;
1056 }
1057 if (count >= 2) {
1058 PushHelper(2, size, src, src, NoReg, NoReg);
1059 count -= 2;
1060 }
1061 if (count == 1) {
1062 PushHelper(1, size, src, NoReg, NoReg, NoReg);
1063 count -= 1;
1064 }
1065 DCHECK(count == 0);
1066}
1067
1068
1069void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1070 PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
1071
1072 UseScratchRegisterScope temps(this);
1073 Register temp = temps.AcquireSameSizeAs(count);
1074
1075 if (FLAG_optimize_for_size) {
1076 Label loop, done;
1077
1078 Subs(temp, count, 1);
1079 B(mi, &done);
1080
1081 // Push all registers individually, to save code size.
1082 Bind(&loop);
1083 Subs(temp, temp, 1);
1084 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1085 B(pl, &loop);
1086
1087 Bind(&done);
1088 } else {
1089 Label loop, leftover2, leftover1, done;
1090
1091 Subs(temp, count, 4);
1092 B(mi, &leftover2);
1093
1094 // Push groups of four first.
1095 Bind(&loop);
1096 Subs(temp, temp, 4);
1097 PushHelper(4, src.SizeInBytes(), src, src, src, src);
1098 B(pl, &loop);
1099
1100 // Push groups of two.
1101 Bind(&leftover2);
1102 Tbz(count, 1, &leftover1);
1103 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
1104
1105 // Push the last one (if required).
1106 Bind(&leftover1);
1107 Tbz(count, 0, &done);
1108 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1109
1110 Bind(&done);
1111 }
1112}
1113
1114
1115void MacroAssembler::PushHelper(int count, int size,
1116 const CPURegister& src0,
1117 const CPURegister& src1,
1118 const CPURegister& src2,
1119 const CPURegister& src3) {
1120 // Ensure that we don't unintentially modify scratch or debug registers.
1121 InstructionAccurateScope scope(this);
1122
1123 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1124 DCHECK(size == src0.SizeInBytes());
1125
1126 // When pushing multiple registers, the store order is chosen such that
1127 // Push(a, b) is equivalent to Push(a) followed by Push(b).
1128 switch (count) {
1129 case 1:
1130 DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1131 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
1132 break;
1133 case 2:
1134 DCHECK(src2.IsNone() && src3.IsNone());
1135 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
1136 break;
1137 case 3:
1138 DCHECK(src3.IsNone());
1139 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
1140 str(src0, MemOperand(StackPointer(), 2 * size));
1141 break;
1142 case 4:
1143 // Skip over 4 * size, then fill in the gap. This allows four W registers
1144 // to be pushed using csp, whilst maintaining 16-byte alignment for csp
1145 // at all times.
1146 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
1147 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
1148 break;
1149 default:
1150 UNREACHABLE();
1151 }
1152}
1153
1154
1155void MacroAssembler::PopHelper(int count, int size,
1156 const CPURegister& dst0,
1157 const CPURegister& dst1,
1158 const CPURegister& dst2,
1159 const CPURegister& dst3) {
1160 // Ensure that we don't unintentially modify scratch or debug registers.
1161 InstructionAccurateScope scope(this);
1162
1163 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1164 DCHECK(size == dst0.SizeInBytes());
1165
1166 // When popping multiple registers, the load order is chosen such that
1167 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1168 switch (count) {
1169 case 1:
1170 DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1171 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1172 break;
1173 case 2:
1174 DCHECK(dst2.IsNone() && dst3.IsNone());
1175 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1176 break;
1177 case 3:
1178 DCHECK(dst3.IsNone());
1179 ldr(dst2, MemOperand(StackPointer(), 2 * size));
1180 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1181 break;
1182 case 4:
1183 // Load the higher addresses first, then load the lower addresses and
1184 // skip the whole block in the second instruction. This allows four W
1185 // registers to be popped using csp, whilst maintaining 16-byte alignment
1186 // for csp at all times.
1187 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1188 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1189 break;
1190 default:
1191 UNREACHABLE();
1192 }
1193}
1194
1195
1196void MacroAssembler::PushPreamble(Operand total_size) {
1197 if (csp.Is(StackPointer())) {
1198 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1199 // on entry and the total size of the specified registers must also be a
1200 // multiple of 16 bytes.
1201 if (total_size.IsImmediate()) {
1202 DCHECK((total_size.ImmediateValue() % 16) == 0);
1203 }
1204
1205 // Don't check access size for non-immediate sizes. It's difficult to do
1206 // well, and it will be caught by hardware (or the simulator) anyway.
1207 } else {
1208 // Even if the current stack pointer is not the system stack pointer (csp),
1209 // the system stack pointer will still be modified in order to comply with
1210 // ABI rules about accessing memory below the system stack pointer.
1211 BumpSystemStackPointer(total_size);
1212 }
1213}
1214
1215
1216void MacroAssembler::PopPostamble(Operand total_size) {
1217 if (csp.Is(StackPointer())) {
1218 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1219 // on entry and the total size of the specified registers must also be a
1220 // multiple of 16 bytes.
1221 if (total_size.IsImmediate()) {
1222 DCHECK((total_size.ImmediateValue() % 16) == 0);
1223 }
1224
1225 // Don't check access size for non-immediate sizes. It's difficult to do
1226 // well, and it will be caught by hardware (or the simulator) anyway.
1227 } else if (emit_debug_code()) {
1228 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1229 // but if we keep it matching StackPointer, the simulator can detect memory
1230 // accesses in the now-free part of the stack.
1231 SyncSystemStackPointer();
1232 }
1233}
1234
1235
1236void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
1237 if (offset.IsImmediate()) {
1238 DCHECK(offset.ImmediateValue() >= 0);
1239 } else if (emit_debug_code()) {
1240 Cmp(xzr, offset);
1241 Check(le, kStackAccessBelowStackPointer);
1242 }
1243
1244 Str(src, MemOperand(StackPointer(), offset));
1245}
1246
1247
1248void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1249 if (offset.IsImmediate()) {
1250 DCHECK(offset.ImmediateValue() >= 0);
1251 } else if (emit_debug_code()) {
1252 Cmp(xzr, offset);
1253 Check(le, kStackAccessBelowStackPointer);
1254 }
1255
1256 Ldr(dst, MemOperand(StackPointer(), offset));
1257}
1258
1259
1260void MacroAssembler::PokePair(const CPURegister& src1,
1261 const CPURegister& src2,
1262 int offset) {
1263 DCHECK(AreSameSizeAndType(src1, src2));
1264 DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1265 Stp(src1, src2, MemOperand(StackPointer(), offset));
1266}
1267
1268
1269void MacroAssembler::PeekPair(const CPURegister& dst1,
1270 const CPURegister& dst2,
1271 int offset) {
1272 DCHECK(AreSameSizeAndType(dst1, dst2));
1273 DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1274 Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
1275}
1276
1277
1278void MacroAssembler::PushCalleeSavedRegisters() {
1279 // Ensure that the macro-assembler doesn't use any scratch registers.
1280 InstructionAccurateScope scope(this);
1281
1282 // This method must not be called unless the current stack pointer is the
1283 // system stack pointer (csp).
1284 DCHECK(csp.Is(StackPointer()));
1285
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001286 MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001287
1288 stp(d14, d15, tos);
1289 stp(d12, d13, tos);
1290 stp(d10, d11, tos);
1291 stp(d8, d9, tos);
1292
1293 stp(x29, x30, tos);
1294 stp(x27, x28, tos); // x28 = jssp
1295 stp(x25, x26, tos);
1296 stp(x23, x24, tos);
1297 stp(x21, x22, tos);
1298 stp(x19, x20, tos);
1299}
1300
1301
1302void MacroAssembler::PopCalleeSavedRegisters() {
1303 // Ensure that the macro-assembler doesn't use any scratch registers.
1304 InstructionAccurateScope scope(this);
1305
1306 // This method must not be called unless the current stack pointer is the
1307 // system stack pointer (csp).
1308 DCHECK(csp.Is(StackPointer()));
1309
1310 MemOperand tos(csp, 2 * kXRegSize, PostIndex);
1311
1312 ldp(x19, x20, tos);
1313 ldp(x21, x22, tos);
1314 ldp(x23, x24, tos);
1315 ldp(x25, x26, tos);
1316 ldp(x27, x28, tos); // x28 = jssp
1317 ldp(x29, x30, tos);
1318
1319 ldp(d8, d9, tos);
1320 ldp(d10, d11, tos);
1321 ldp(d12, d13, tos);
1322 ldp(d14, d15, tos);
1323}
1324
1325
1326void MacroAssembler::AssertStackConsistency() {
1327 // Avoid emitting code when !use_real_abort() since non-real aborts cause too
1328 // much code to be generated.
1329 if (emit_debug_code() && use_real_aborts()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001330 if (csp.Is(StackPointer())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001331 // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
1332 // can't check the alignment of csp without using a scratch register (or
1333 // clobbering the flags), but the processor (or simulator) will abort if
1334 // it is not properly aligned during a load.
1335 ldr(xzr, MemOperand(csp, 0));
1336 }
1337 if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
1338 Label ok;
1339 // Check that csp <= StackPointer(), preserving all registers and NZCV.
1340 sub(StackPointer(), csp, StackPointer());
1341 cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
1342 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
1343
1344 // Avoid generating AssertStackConsistency checks for the Push in Abort.
1345 { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001346 // Restore StackPointer().
1347 sub(StackPointer(), csp, StackPointer());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001348 Abort(kTheCurrentStackPointerIsBelowCsp);
1349 }
1350
1351 bind(&ok);
1352 // Restore StackPointer().
1353 sub(StackPointer(), csp, StackPointer());
1354 }
1355 }
1356}
1357
Ben Murdochda12d292016-06-02 14:46:10 +01001358void MacroAssembler::AssertCspAligned() {
1359 if (emit_debug_code() && use_real_aborts()) {
1360 // TODO(titzer): use a real assert for alignment check?
1361 UseScratchRegisterScope scope(this);
1362 Register temp = scope.AcquireX();
1363 ldr(temp, MemOperand(csp));
1364 }
1365}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001366
1367void MacroAssembler::AssertFPCRState(Register fpcr) {
1368 if (emit_debug_code()) {
1369 Label unexpected_mode, done;
1370 UseScratchRegisterScope temps(this);
1371 if (fpcr.IsNone()) {
1372 fpcr = temps.AcquireX();
1373 Mrs(fpcr, FPCR);
1374 }
1375
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001376 // Settings left to their default values:
1377 // - Assert that flush-to-zero is not set.
1378 Tbnz(fpcr, FZ_offset, &unexpected_mode);
1379 // - Assert that the rounding mode is nearest-with-ties-to-even.
1380 STATIC_ASSERT(FPTieEven == 0);
1381 Tst(fpcr, RMode_mask);
1382 B(eq, &done);
1383
1384 Bind(&unexpected_mode);
1385 Abort(kUnexpectedFPCRMode);
1386
1387 Bind(&done);
1388 }
1389}
1390
1391
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001392void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
1393 const FPRegister& src) {
1394 AssertFPCRState();
1395
Ben Murdochc5610432016-08-08 18:44:38 +01001396 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
1397 // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
1398 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001399 Fsub(dst, src, fp_zero);
1400}
1401
1402
1403void MacroAssembler::LoadRoot(CPURegister destination,
1404 Heap::RootListIndex index) {
1405 // TODO(jbramley): Most root values are constants, and can be synthesized
1406 // without a load. Refer to the ARM back end for details.
1407 Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
1408}
1409
1410
1411void MacroAssembler::StoreRoot(Register source,
1412 Heap::RootListIndex index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001413 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001414 Str(source, MemOperand(root, index << kPointerSizeLog2));
1415}
1416
1417
1418void MacroAssembler::LoadTrueFalseRoots(Register true_root,
1419 Register false_root) {
1420 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
1421 Ldp(true_root, false_root,
1422 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
1423}
1424
1425
1426void MacroAssembler::LoadHeapObject(Register result,
1427 Handle<HeapObject> object) {
1428 AllowDeferredHandleDereference using_raw_address;
1429 if (isolate()->heap()->InNewSpace(*object)) {
1430 Handle<Cell> cell = isolate()->factory()->NewCell(object);
1431 Mov(result, Operand(cell));
1432 Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
1433 } else {
1434 Mov(result, Operand(object));
1435 }
1436}
1437
1438
1439void MacroAssembler::LoadInstanceDescriptors(Register map,
1440 Register descriptors) {
1441 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
1442}
1443
1444
1445void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
1446 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
1447 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
1448}
1449
1450
1451void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
1452 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
1453 Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
1454 And(dst, dst, Map::EnumLengthBits::kMask);
1455}
1456
1457
1458void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
1459 EnumLengthUntagged(dst, map);
1460 SmiTag(dst, dst);
1461}
1462
1463
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001464void MacroAssembler::LoadAccessor(Register dst, Register holder,
1465 int accessor_index,
1466 AccessorComponent accessor) {
1467 Ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
1468 LoadInstanceDescriptors(dst, dst);
1469 Ldr(dst,
1470 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
1471 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
1472 : AccessorPair::kSetterOffset;
1473 Ldr(dst, FieldMemOperand(dst, offset));
1474}
1475
1476
Ben Murdoch097c5b22016-05-18 11:27:45 +01001477void MacroAssembler::CheckEnumCache(Register object, Register scratch0,
1478 Register scratch1, Register scratch2,
1479 Register scratch3, Register scratch4,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001480 Label* call_runtime) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001481 DCHECK(!AreAliased(object, scratch0, scratch1, scratch2, scratch3, scratch4));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001482
1483 Register empty_fixed_array_value = scratch0;
1484 Register current_object = scratch1;
Ben Murdoch097c5b22016-05-18 11:27:45 +01001485 Register null_value = scratch4;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001486
1487 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1488 Label next, start;
1489
1490 Mov(current_object, object);
1491
1492 // Check if the enum length field is properly initialized, indicating that
1493 // there is an enum cache.
1494 Register map = scratch2;
1495 Register enum_length = scratch3;
1496 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1497
1498 EnumLengthUntagged(enum_length, map);
1499 Cmp(enum_length, kInvalidEnumCacheSentinel);
1500 B(eq, call_runtime);
1501
Ben Murdoch097c5b22016-05-18 11:27:45 +01001502 LoadRoot(null_value, Heap::kNullValueRootIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001503 B(&start);
1504
1505 Bind(&next);
1506 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1507
1508 // For all objects but the receiver, check that the cache is empty.
1509 EnumLengthUntagged(enum_length, map);
1510 Cbnz(enum_length, call_runtime);
1511
1512 Bind(&start);
1513
1514 // Check that there are no elements. Register current_object contains the
1515 // current JS object we've reached through the prototype chain.
1516 Label no_elements;
1517 Ldr(current_object, FieldMemOperand(current_object,
1518 JSObject::kElementsOffset));
1519 Cmp(current_object, empty_fixed_array_value);
1520 B(eq, &no_elements);
1521
1522 // Second chance, the object may be using the empty slow element dictionary.
1523 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1524 B(ne, call_runtime);
1525
1526 Bind(&no_elements);
1527 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
1528 Cmp(current_object, null_value);
1529 B(ne, &next);
1530}
1531
1532
1533void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
1534 Register scratch1,
1535 Register scratch2,
1536 Label* no_memento_found) {
Ben Murdochda12d292016-06-02 14:46:10 +01001537 Label map_check;
1538 Label top_check;
Ben Murdochc5610432016-08-08 18:44:38 +01001539 ExternalReference new_space_allocation_top_adr =
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001540 ExternalReference::new_space_allocation_top_address(isolate());
Ben Murdochda12d292016-06-02 14:46:10 +01001541 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
1542 const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001543
Ben Murdochda12d292016-06-02 14:46:10 +01001544 // Bail out if the object is not in new space.
1545 JumpIfNotInNewSpace(receiver, no_memento_found);
1546 Add(scratch1, receiver, kMementoEndOffset);
1547 // If the object is in new space, we need to check whether it is on the same
1548 // page as the current top.
Ben Murdochc5610432016-08-08 18:44:38 +01001549 Mov(scratch2, new_space_allocation_top_adr);
1550 Ldr(scratch2, MemOperand(scratch2));
1551 Eor(scratch2, scratch1, scratch2);
Ben Murdochda12d292016-06-02 14:46:10 +01001552 Tst(scratch2, ~Page::kPageAlignmentMask);
1553 B(eq, &top_check);
1554 // The object is on a different page than allocation top. Bail out if the
1555 // object sits on the page boundary as no memento can follow and we cannot
1556 // touch the memory following it.
1557 Eor(scratch2, scratch1, receiver);
1558 Tst(scratch2, ~Page::kPageAlignmentMask);
1559 B(ne, no_memento_found);
1560 // Continue with the actual map check.
1561 jmp(&map_check);
1562 // If top is on the same page as the current object, we need to check whether
1563 // we are below top.
1564 bind(&top_check);
Ben Murdochc5610432016-08-08 18:44:38 +01001565 Mov(scratch2, new_space_allocation_top_adr);
1566 Ldr(scratch2, MemOperand(scratch2));
1567 Cmp(scratch1, scratch2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001568 B(gt, no_memento_found);
Ben Murdochda12d292016-06-02 14:46:10 +01001569 // Memento map check.
1570 bind(&map_check);
1571 Ldr(scratch1, MemOperand(receiver, kMementoMapOffset));
1572 Cmp(scratch1, Operand(isolate()->factory()->allocation_memento_map()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001573}
1574
1575
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001576void MacroAssembler::InNewSpace(Register object,
1577 Condition cond,
1578 Label* branch) {
1579 DCHECK(cond == eq || cond == ne);
1580 UseScratchRegisterScope temps(this);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001581 const int mask =
1582 (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
1583 CheckPageFlag(object, temps.AcquireSameSizeAs(object), mask, cond, branch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001584}
1585
1586
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001587void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
1588 if (emit_debug_code()) {
1589 STATIC_ASSERT(kSmiTag == 0);
1590 Tst(object, kSmiTagMask);
1591 Check(eq, reason);
1592 }
1593}
1594
1595
1596void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
1597 if (emit_debug_code()) {
1598 STATIC_ASSERT(kSmiTag == 0);
1599 Tst(object, kSmiTagMask);
1600 Check(ne, reason);
1601 }
1602}
1603
1604
1605void MacroAssembler::AssertName(Register object) {
1606 if (emit_debug_code()) {
1607 AssertNotSmi(object, kOperandIsASmiAndNotAName);
1608
1609 UseScratchRegisterScope temps(this);
1610 Register temp = temps.AcquireX();
1611
1612 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1613 CompareInstanceType(temp, temp, LAST_NAME_TYPE);
1614 Check(ls, kOperandIsNotAName);
1615 }
1616}
1617
1618
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001619void MacroAssembler::AssertFunction(Register object) {
1620 if (emit_debug_code()) {
1621 AssertNotSmi(object, kOperandIsASmiAndNotAFunction);
1622
1623 UseScratchRegisterScope temps(this);
1624 Register temp = temps.AcquireX();
1625
1626 CompareObjectType(object, temp, temp, JS_FUNCTION_TYPE);
1627 Check(eq, kOperandIsNotAFunction);
1628 }
1629}
1630
1631
1632void MacroAssembler::AssertBoundFunction(Register object) {
1633 if (emit_debug_code()) {
1634 AssertNotSmi(object, kOperandIsASmiAndNotABoundFunction);
1635
1636 UseScratchRegisterScope temps(this);
1637 Register temp = temps.AcquireX();
1638
1639 CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
1640 Check(eq, kOperandIsNotABoundFunction);
1641 }
1642}
1643
Ben Murdochc5610432016-08-08 18:44:38 +01001644void MacroAssembler::AssertGeneratorObject(Register object) {
1645 if (emit_debug_code()) {
1646 AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
1647
1648 UseScratchRegisterScope temps(this);
1649 Register temp = temps.AcquireX();
1650
1651 CompareObjectType(object, temp, temp, JS_GENERATOR_OBJECT_TYPE);
1652 Check(eq, kOperandIsNotAGeneratorObject);
1653 }
1654}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001655
Ben Murdoch097c5b22016-05-18 11:27:45 +01001656void MacroAssembler::AssertReceiver(Register object) {
1657 if (emit_debug_code()) {
1658 AssertNotSmi(object, kOperandIsASmiAndNotAReceiver);
1659
1660 UseScratchRegisterScope temps(this);
1661 Register temp = temps.AcquireX();
1662
1663 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1664 CompareObjectType(object, temp, temp, FIRST_JS_RECEIVER_TYPE);
1665 Check(hs, kOperandIsNotAReceiver);
1666 }
1667}
1668
1669
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001670void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1671 Register scratch) {
1672 if (emit_debug_code()) {
1673 Label done_checking;
1674 AssertNotSmi(object);
1675 JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
1676 Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1677 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1678 Assert(eq, kExpectedUndefinedOrCell);
1679 Bind(&done_checking);
1680 }
1681}
1682
1683
1684void MacroAssembler::AssertString(Register object) {
1685 if (emit_debug_code()) {
1686 UseScratchRegisterScope temps(this);
1687 Register temp = temps.AcquireX();
1688 STATIC_ASSERT(kSmiTag == 0);
1689 Tst(object, kSmiTagMask);
1690 Check(ne, kOperandIsASmiAndNotAString);
1691 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1692 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
1693 Check(lo, kOperandIsNotAString);
1694 }
1695}
1696
1697
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001698void MacroAssembler::AssertPositiveOrZero(Register value) {
1699 if (emit_debug_code()) {
1700 Label done;
1701 int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
1702 Tbz(value, sign_bit, &done);
1703 Abort(kUnexpectedNegativeValue);
1704 Bind(&done);
1705 }
1706}
1707
Ben Murdochda12d292016-06-02 14:46:10 +01001708void MacroAssembler::AssertNotNumber(Register value) {
1709 if (emit_debug_code()) {
1710 STATIC_ASSERT(kSmiTag == 0);
1711 Tst(value, kSmiTagMask);
1712 Check(ne, kOperandIsANumber);
1713 Label done;
1714 JumpIfNotHeapNumber(value, &done);
1715 Abort(kOperandIsANumber);
1716 Bind(&done);
1717 }
1718}
1719
Ben Murdoch097c5b22016-05-18 11:27:45 +01001720void MacroAssembler::AssertNumber(Register value) {
1721 if (emit_debug_code()) {
1722 Label done;
1723 JumpIfSmi(value, &done);
1724 JumpIfHeapNumber(value, &done);
1725 Abort(kOperandIsNotANumber);
1726 Bind(&done);
1727 }
1728}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001729
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001730void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1731 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1732 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1733}
1734
1735
1736void MacroAssembler::TailCallStub(CodeStub* stub) {
1737 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1738}
1739
1740
1741void MacroAssembler::CallRuntime(const Runtime::Function* f,
1742 int num_arguments,
1743 SaveFPRegsMode save_doubles) {
1744 // All arguments must be on the stack before this function is called.
1745 // x0 holds the return value after the call.
1746
1747 // Check that the number of arguments matches what the function expects.
1748 // If f->nargs is -1, the function can accept a variable number of arguments.
1749 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1750
1751 // Place the necessary arguments.
1752 Mov(x0, num_arguments);
1753 Mov(x1, ExternalReference(f, isolate()));
1754
1755 CEntryStub stub(isolate(), 1, save_doubles);
1756 CallStub(&stub);
1757}
1758
1759
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001760void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1761 int num_arguments) {
1762 Mov(x0, num_arguments);
1763 Mov(x1, ext);
1764
1765 CEntryStub stub(isolate(), 1);
1766 CallStub(&stub);
1767}
1768
1769
1770void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
1771 Mov(x1, builtin);
1772 CEntryStub stub(isolate(), 1);
1773 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1774}
1775
1776
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001777void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1778 const Runtime::Function* function = Runtime::FunctionForId(fid);
1779 DCHECK_EQ(1, function->result_size);
1780 if (function->nargs >= 0) {
1781 // TODO(1236192): Most runtime routines don't need the number of
1782 // arguments passed in because it is constant. At some point we
1783 // should remove this need and make the runtime routine entry code
1784 // smarter.
1785 Mov(x0, function->nargs);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001786 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001787 JumpToExternalReference(ExternalReference(fid, isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001788}
1789
1790
1791void MacroAssembler::InitializeNewString(Register string,
1792 Register length,
1793 Heap::RootListIndex map_index,
1794 Register scratch1,
1795 Register scratch2) {
1796 DCHECK(!AreAliased(string, length, scratch1, scratch2));
1797 LoadRoot(scratch2, map_index);
1798 SmiTag(scratch1, length);
1799 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1800
1801 Mov(scratch2, String::kEmptyHashField);
1802 Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1803 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
1804}
1805
1806
1807int MacroAssembler::ActivationFrameAlignment() {
1808#if V8_HOST_ARCH_ARM64
1809 // Running on the real platform. Use the alignment as mandated by the local
1810 // environment.
1811 // Note: This will break if we ever start generating snapshots on one ARM
1812 // platform for another ARM platform with a different alignment.
1813 return base::OS::ActivationFrameAlignment();
1814#else // V8_HOST_ARCH_ARM64
1815 // If we are using the simulator then we should always align to the expected
1816 // alignment. As the simulator is used to generate snapshots we do not know
1817 // if the target platform will need alignment, so this is controlled from a
1818 // flag.
1819 return FLAG_sim_stack_alignment;
1820#endif // V8_HOST_ARCH_ARM64
1821}
1822
1823
1824void MacroAssembler::CallCFunction(ExternalReference function,
1825 int num_of_reg_args) {
1826 CallCFunction(function, num_of_reg_args, 0);
1827}
1828
1829
1830void MacroAssembler::CallCFunction(ExternalReference function,
1831 int num_of_reg_args,
1832 int num_of_double_args) {
1833 UseScratchRegisterScope temps(this);
1834 Register temp = temps.AcquireX();
1835 Mov(temp, function);
1836 CallCFunction(temp, num_of_reg_args, num_of_double_args);
1837}
1838
1839
1840void MacroAssembler::CallCFunction(Register function,
1841 int num_of_reg_args,
1842 int num_of_double_args) {
1843 DCHECK(has_frame());
1844 // We can pass 8 integer arguments in registers. If we need to pass more than
1845 // that, we'll need to implement support for passing them on the stack.
1846 DCHECK(num_of_reg_args <= 8);
1847
1848 // If we're passing doubles, we're limited to the following prototypes
1849 // (defined by ExternalReference::Type):
1850 // BUILTIN_COMPARE_CALL: int f(double, double)
1851 // BUILTIN_FP_FP_CALL: double f(double, double)
1852 // BUILTIN_FP_CALL: double f(double)
1853 // BUILTIN_FP_INT_CALL: double f(double, int)
1854 if (num_of_double_args > 0) {
1855 DCHECK(num_of_reg_args <= 1);
1856 DCHECK((num_of_double_args + num_of_reg_args) <= 2);
1857 }
1858
1859
1860 // If the stack pointer is not csp, we need to derive an aligned csp from the
1861 // current stack pointer.
1862 const Register old_stack_pointer = StackPointer();
1863 if (!csp.Is(old_stack_pointer)) {
1864 AssertStackConsistency();
1865
1866 int sp_alignment = ActivationFrameAlignment();
1867 // The ABI mandates at least 16-byte alignment.
1868 DCHECK(sp_alignment >= 16);
1869 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
1870
1871 // The current stack pointer is a callee saved register, and is preserved
1872 // across the call.
1873 DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
1874
1875 // Align and synchronize the system stack pointer with jssp.
1876 Bic(csp, old_stack_pointer, sp_alignment - 1);
1877 SetStackPointer(csp);
1878 }
1879
1880 // Call directly. The function called cannot cause a GC, or allow preemption,
1881 // so the return address in the link register stays correct.
1882 Call(function);
1883
1884 if (!csp.Is(old_stack_pointer)) {
1885 if (emit_debug_code()) {
1886 // Because the stack pointer must be aligned on a 16-byte boundary, the
1887 // aligned csp can be up to 12 bytes below the jssp. This is the case
1888 // where we only pushed one W register on top of an aligned jssp.
1889 UseScratchRegisterScope temps(this);
1890 Register temp = temps.AcquireX();
1891 DCHECK(ActivationFrameAlignment() == 16);
1892 Sub(temp, csp, old_stack_pointer);
1893 // We want temp <= 0 && temp >= -12.
1894 Cmp(temp, 0);
1895 Ccmp(temp, -12, NFlag, le);
1896 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
1897 }
1898 SetStackPointer(old_stack_pointer);
1899 }
1900}
1901
1902
1903void MacroAssembler::Jump(Register target) {
1904 Br(target);
1905}
1906
1907
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001908void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
1909 Condition cond) {
1910 if (cond == nv) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001911 UseScratchRegisterScope temps(this);
1912 Register temp = temps.AcquireX();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001913 Label done;
1914 if (cond != al) B(NegateCondition(cond), &done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001915 Mov(temp, Operand(target, rmode));
1916 Br(temp);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001917 Bind(&done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001918}
1919
1920
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001921void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
1922 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001923 DCHECK(!RelocInfo::IsCodeTarget(rmode));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001924 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001925}
1926
1927
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001928void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
1929 Condition cond) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001930 DCHECK(RelocInfo::IsCodeTarget(rmode));
1931 AllowDeferredHandleDereference embedding_raw_address;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001932 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001933}
1934
1935
1936void MacroAssembler::Call(Register target) {
1937 BlockPoolsScope scope(this);
1938#ifdef DEBUG
1939 Label start_call;
1940 Bind(&start_call);
1941#endif
1942
1943 Blr(target);
1944
1945#ifdef DEBUG
1946 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1947#endif
1948}
1949
1950
1951void MacroAssembler::Call(Label* target) {
1952 BlockPoolsScope scope(this);
1953#ifdef DEBUG
1954 Label start_call;
1955 Bind(&start_call);
1956#endif
1957
1958 Bl(target);
1959
1960#ifdef DEBUG
1961 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1962#endif
1963}
1964
1965
1966// MacroAssembler::CallSize is sensitive to changes in this function, as it
1967// requires to know how many instructions are used to branch to the target.
1968void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
1969 BlockPoolsScope scope(this);
1970#ifdef DEBUG
1971 Label start_call;
1972 Bind(&start_call);
1973#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001974
1975 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1976 DCHECK(rmode != RelocInfo::NONE32);
1977
1978 UseScratchRegisterScope temps(this);
1979 Register temp = temps.AcquireX();
1980
1981 if (rmode == RelocInfo::NONE64) {
1982 // Addresses are 48 bits so we never need to load the upper 16 bits.
1983 uint64_t imm = reinterpret_cast<uint64_t>(target);
1984 // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
1985 DCHECK(((imm >> 48) & 0xffff) == 0);
1986 movz(temp, (imm >> 0) & 0xffff, 0);
1987 movk(temp, (imm >> 16) & 0xffff, 16);
1988 movk(temp, (imm >> 32) & 0xffff, 32);
1989 } else {
1990 Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
1991 }
1992 Blr(temp);
1993#ifdef DEBUG
1994 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
1995#endif
1996}
1997
1998
1999void MacroAssembler::Call(Handle<Code> code,
2000 RelocInfo::Mode rmode,
2001 TypeFeedbackId ast_id) {
2002#ifdef DEBUG
2003 Label start_call;
2004 Bind(&start_call);
2005#endif
2006
2007 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
2008 SetRecordedAstId(ast_id);
2009 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2010 }
2011
2012 AllowDeferredHandleDereference embedding_raw_address;
2013 Call(reinterpret_cast<Address>(code.location()), rmode);
2014
2015#ifdef DEBUG
2016 // Check the size of the code generated.
2017 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
2018#endif
2019}
2020
2021
2022int MacroAssembler::CallSize(Register target) {
2023 USE(target);
2024 return kInstructionSize;
2025}
2026
2027
2028int MacroAssembler::CallSize(Label* target) {
2029 USE(target);
2030 return kInstructionSize;
2031}
2032
2033
2034int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
2035 USE(target);
2036
2037 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2038 DCHECK(rmode != RelocInfo::NONE32);
2039
2040 if (rmode == RelocInfo::NONE64) {
2041 return kCallSizeWithoutRelocation;
2042 } else {
2043 return kCallSizeWithRelocation;
2044 }
2045}
2046
2047
2048int MacroAssembler::CallSize(Handle<Code> code,
2049 RelocInfo::Mode rmode,
2050 TypeFeedbackId ast_id) {
2051 USE(code);
2052 USE(ast_id);
2053
2054 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2055 DCHECK(rmode != RelocInfo::NONE32);
2056
2057 if (rmode == RelocInfo::NONE64) {
2058 return kCallSizeWithoutRelocation;
2059 } else {
2060 return kCallSizeWithRelocation;
2061 }
2062}
2063
2064
2065void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
2066 SmiCheckType smi_check_type) {
2067 Label on_not_heap_number;
2068
2069 if (smi_check_type == DO_SMI_CHECK) {
2070 JumpIfSmi(object, &on_not_heap_number);
2071 }
2072
2073 AssertNotSmi(object);
2074
2075 UseScratchRegisterScope temps(this);
2076 Register temp = temps.AcquireX();
2077 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2078 JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
2079
2080 Bind(&on_not_heap_number);
2081}
2082
2083
2084void MacroAssembler::JumpIfNotHeapNumber(Register object,
2085 Label* on_not_heap_number,
2086 SmiCheckType smi_check_type) {
2087 if (smi_check_type == DO_SMI_CHECK) {
2088 JumpIfSmi(object, on_not_heap_number);
2089 }
2090
2091 AssertNotSmi(object);
2092
2093 UseScratchRegisterScope temps(this);
2094 Register temp = temps.AcquireX();
2095 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2096 JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
2097}
2098
2099
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002100void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
2101 FPRegister value,
2102 FPRegister scratch_d,
2103 Label* on_successful_conversion,
2104 Label* on_failed_conversion) {
2105 // Convert to an int and back again, then compare with the original value.
2106 Fcvtzs(as_int, value);
2107 Scvtf(scratch_d, as_int);
2108 Fcmp(value, scratch_d);
2109
2110 if (on_successful_conversion) {
2111 B(on_successful_conversion, eq);
2112 }
2113 if (on_failed_conversion) {
2114 B(on_failed_conversion, ne);
2115 }
2116}
2117
2118
2119void MacroAssembler::TestForMinusZero(DoubleRegister input) {
2120 UseScratchRegisterScope temps(this);
2121 Register temp = temps.AcquireX();
2122 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
2123 // cause overflow.
2124 Fmov(temp, input);
2125 Cmp(temp, 1);
2126}
2127
2128
2129void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
2130 Label* on_negative_zero) {
2131 TestForMinusZero(input);
2132 B(vs, on_negative_zero);
2133}
2134
2135
2136void MacroAssembler::JumpIfMinusZero(Register input,
2137 Label* on_negative_zero) {
2138 DCHECK(input.Is64Bits());
2139 // Floating point value is in an integer register. Detect -0.0 by subtracting
2140 // 1 (cmp), which will cause overflow.
2141 Cmp(input, 1);
2142 B(vs, on_negative_zero);
2143}
2144
2145
2146void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
2147 // Clamp the value to [0..255].
2148 Cmp(input.W(), Operand(input.W(), UXTB));
2149 // If input < input & 0xff, it must be < 0, so saturate to 0.
2150 Csel(output.W(), wzr, input.W(), lt);
2151 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
2152 Csel(output.W(), output.W(), 255, le);
2153}
2154
2155
2156void MacroAssembler::ClampInt32ToUint8(Register in_out) {
2157 ClampInt32ToUint8(in_out, in_out);
2158}
2159
2160
2161void MacroAssembler::ClampDoubleToUint8(Register output,
2162 DoubleRegister input,
2163 DoubleRegister dbl_scratch) {
2164 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
2165 // - Inputs lower than 0 (including -infinity) produce 0.
2166 // - Inputs higher than 255 (including +infinity) produce 255.
2167 // Also, it seems that PIXEL types use round-to-nearest rather than
2168 // round-towards-zero.
2169
2170 // Squash +infinity before the conversion, since Fcvtnu will normally
2171 // convert it to 0.
2172 Fmov(dbl_scratch, 255);
2173 Fmin(dbl_scratch, dbl_scratch, input);
2174
2175 // Convert double to unsigned integer. Values less than zero become zero.
2176 // Values greater than 255 have already been clamped to 255.
2177 Fcvtnu(output, dbl_scratch);
2178}
2179
2180
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002181void MacroAssembler::CopyBytes(Register dst,
2182 Register src,
2183 Register length,
2184 Register scratch,
2185 CopyHint hint) {
2186 UseScratchRegisterScope temps(this);
2187 Register tmp1 = temps.AcquireX();
2188 Register tmp2 = temps.AcquireX();
2189 DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
2190 DCHECK(!AreAliased(src, dst, csp));
2191
2192 if (emit_debug_code()) {
2193 // Check copy length.
2194 Cmp(length, 0);
2195 Assert(ge, kUnexpectedNegativeValue);
2196
2197 // Check src and dst buffers don't overlap.
2198 Add(scratch, src, length); // Calculate end of src buffer.
2199 Cmp(scratch, dst);
2200 Add(scratch, dst, length); // Calculate end of dst buffer.
2201 Ccmp(scratch, src, ZFlag, gt);
2202 Assert(le, kCopyBuffersOverlap);
2203 }
2204
2205 Label short_copy, short_loop, bulk_loop, done;
2206
2207 if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
2208 Register bulk_length = scratch;
2209 int pair_size = 2 * kXRegSize;
2210 int pair_mask = pair_size - 1;
2211
2212 Bic(bulk_length, length, pair_mask);
2213 Cbz(bulk_length, &short_copy);
2214 Bind(&bulk_loop);
2215 Sub(bulk_length, bulk_length, pair_size);
2216 Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
2217 Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
2218 Cbnz(bulk_length, &bulk_loop);
2219
2220 And(length, length, pair_mask);
2221 }
2222
2223 Bind(&short_copy);
2224 Cbz(length, &done);
2225 Bind(&short_loop);
2226 Sub(length, length, 1);
2227 Ldrb(tmp1, MemOperand(src, 1, PostIndex));
2228 Strb(tmp1, MemOperand(dst, 1, PostIndex));
2229 Cbnz(length, &short_loop);
2230
2231
2232 Bind(&done);
2233}
2234
2235
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002236void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
2237 Register end_address,
2238 Register filler) {
2239 DCHECK(!current_address.Is(csp));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002240 UseScratchRegisterScope temps(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002241 Register distance_in_words = temps.AcquireX();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002242 Label done;
2243
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002244 // Calculate the distance. If it's <= zero then there's nothing to do.
2245 Subs(distance_in_words, end_address, current_address);
2246 B(le, &done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002247
2248 // There's at least one field to fill, so do this unconditionally.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002249 Str(filler, MemOperand(current_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002250
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002251 // If the distance_in_words consists of odd number of words we advance
2252 // start_address by one word, otherwise the pairs loop will ovwerite the
2253 // field that was stored above.
2254 And(distance_in_words, distance_in_words, kPointerSize);
2255 Add(current_address, current_address, distance_in_words);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002256
2257 // Store filler to memory in pairs.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002258 Label loop, entry;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002259 B(&entry);
2260 Bind(&loop);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002261 Stp(filler, filler, MemOperand(current_address, 2 * kPointerSize, PostIndex));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002262 Bind(&entry);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002263 Cmp(current_address, end_address);
2264 B(lo, &loop);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002265
2266 Bind(&done);
2267}
2268
2269
2270void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
2271 Register first, Register second, Register scratch1, Register scratch2,
2272 Label* failure, SmiCheckType smi_check) {
2273 if (smi_check == DO_SMI_CHECK) {
2274 JumpIfEitherSmi(first, second, failure);
2275 } else if (emit_debug_code()) {
2276 DCHECK(smi_check == DONT_DO_SMI_CHECK);
2277 Label not_smi;
2278 JumpIfEitherSmi(first, second, NULL, &not_smi);
2279
2280 // At least one input is a smi, but the flags indicated a smi check wasn't
2281 // needed.
2282 Abort(kUnexpectedSmi);
2283
2284 Bind(&not_smi);
2285 }
2286
2287 // Test that both first and second are sequential one-byte strings.
2288 Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2289 Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2290 Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2291 Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2292
2293 JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
2294 scratch2, failure);
2295}
2296
2297
2298void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
2299 Register first, Register second, Register scratch1, Register scratch2,
2300 Label* failure) {
2301 DCHECK(!AreAliased(scratch1, second));
2302 DCHECK(!AreAliased(scratch1, scratch2));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002303 const int kFlatOneByteStringMask =
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002304 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002305 const int kFlatOneByteStringTag =
2306 kStringTag | kOneByteStringTag | kSeqStringTag;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002307 And(scratch1, first, kFlatOneByteStringMask);
2308 And(scratch2, second, kFlatOneByteStringMask);
2309 Cmp(scratch1, kFlatOneByteStringTag);
2310 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2311 B(ne, failure);
2312}
2313
2314
2315void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
2316 Register scratch,
2317 Label* failure) {
2318 const int kFlatOneByteStringMask =
2319 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2320 const int kFlatOneByteStringTag =
2321 kStringTag | kOneByteStringTag | kSeqStringTag;
2322 And(scratch, type, kFlatOneByteStringMask);
2323 Cmp(scratch, kFlatOneByteStringTag);
2324 B(ne, failure);
2325}
2326
2327
2328void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2329 Register first, Register second, Register scratch1, Register scratch2,
2330 Label* failure) {
2331 DCHECK(!AreAliased(first, second, scratch1, scratch2));
2332 const int kFlatOneByteStringMask =
2333 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2334 const int kFlatOneByteStringTag =
2335 kStringTag | kOneByteStringTag | kSeqStringTag;
2336 And(scratch1, first, kFlatOneByteStringMask);
2337 And(scratch2, second, kFlatOneByteStringMask);
2338 Cmp(scratch1, kFlatOneByteStringTag);
2339 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2340 B(ne, failure);
2341}
2342
2343
2344void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
2345 Label* not_unique_name) {
2346 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2347 // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
2348 // continue
2349 // } else {
2350 // goto not_unique_name
2351 // }
2352 Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
2353 Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
2354 B(ne, not_unique_name);
2355}
2356
Ben Murdochda12d292016-06-02 14:46:10 +01002357void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
2358 Register caller_args_count_reg,
2359 Register scratch0, Register scratch1) {
2360#if DEBUG
2361 if (callee_args_count.is_reg()) {
2362 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
2363 scratch1));
2364 } else {
2365 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
2366 }
2367#endif
2368
2369 // Calculate the end of destination area where we will put the arguments
2370 // after we drop current frame. We add kPointerSize to count the receiver
2371 // argument which is not included into formal parameters count.
2372 Register dst_reg = scratch0;
2373 __ add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
2374 __ add(dst_reg, dst_reg,
2375 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
2376
2377 Register src_reg = caller_args_count_reg;
2378 // Calculate the end of source area. +kPointerSize is for the receiver.
2379 if (callee_args_count.is_reg()) {
2380 add(src_reg, jssp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
2381 add(src_reg, src_reg, Operand(kPointerSize));
2382 } else {
2383 add(src_reg, jssp,
2384 Operand((callee_args_count.immediate() + 1) * kPointerSize));
2385 }
2386
2387 if (FLAG_debug_code) {
2388 __ Cmp(src_reg, dst_reg);
2389 __ Check(lo, kStackAccessBelowStackPointer);
2390 }
2391
2392 // Restore caller's frame pointer and return address now as they will be
2393 // overwritten by the copying loop.
2394 __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
2395 __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2396
2397 // Now copy callee arguments to the caller frame going backwards to avoid
2398 // callee arguments corruption (source and destination areas could overlap).
2399
2400 // Both src_reg and dst_reg are pointing to the word after the one to copy,
2401 // so they must be pre-decremented in the loop.
2402 Register tmp_reg = scratch1;
2403 Label loop, entry;
2404 __ B(&entry);
2405 __ bind(&loop);
2406 __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
2407 __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
2408 __ bind(&entry);
2409 __ Cmp(jssp, src_reg);
2410 __ B(ne, &loop);
2411
2412 // Leave current frame.
2413 __ Mov(jssp, dst_reg);
2414 __ SetStackPointer(jssp);
2415 __ AssertStackConsistency();
2416}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002417
2418void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2419 const ParameterCount& actual,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002420 Label* done,
2421 InvokeFlag flag,
2422 bool* definitely_mismatches,
2423 const CallWrapper& call_wrapper) {
2424 bool definitely_matches = false;
2425 *definitely_mismatches = false;
2426 Label regular_invoke;
2427
2428 // Check whether the expected and actual arguments count match. If not,
2429 // setup registers according to contract with ArgumentsAdaptorTrampoline:
2430 // x0: actual arguments count.
2431 // x1: function (passed through to callee).
2432 // x2: expected arguments count.
2433
2434 // The code below is made a lot easier because the calling code already sets
2435 // up actual and expected registers according to the contract if values are
2436 // passed in registers.
2437 DCHECK(actual.is_immediate() || actual.reg().is(x0));
2438 DCHECK(expected.is_immediate() || expected.reg().is(x2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002439
2440 if (expected.is_immediate()) {
2441 DCHECK(actual.is_immediate());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002442 Mov(x0, actual.immediate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002443 if (expected.immediate() == actual.immediate()) {
2444 definitely_matches = true;
2445
2446 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002447 if (expected.immediate() ==
2448 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2449 // Don't worry about adapting arguments for builtins that
2450 // don't want that done. Skip adaption code by making it look
2451 // like we have a match between expected and actual number of
2452 // arguments.
2453 definitely_matches = true;
2454 } else {
2455 *definitely_mismatches = true;
2456 // Set up x2 for the argument adaptor.
2457 Mov(x2, expected.immediate());
2458 }
2459 }
2460
2461 } else { // expected is a register.
2462 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2463 : Operand(actual.reg());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002464 Mov(x0, actual_op);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002465 // If actual == expected perform a regular invocation.
2466 Cmp(expected.reg(), actual_op);
2467 B(eq, &regular_invoke);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002468 }
2469
2470 // If the argument counts may mismatch, generate a call to the argument
2471 // adaptor.
2472 if (!definitely_matches) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002473 Handle<Code> adaptor =
2474 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2475 if (flag == CALL_FUNCTION) {
2476 call_wrapper.BeforeCall(CallSize(adaptor));
2477 Call(adaptor);
2478 call_wrapper.AfterCall();
2479 if (!*definitely_mismatches) {
2480 // If the arg counts don't match, no extra code is emitted by
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002481 // MAsm::InvokeFunctionCode and we can just fall through.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002482 B(done);
2483 }
2484 } else {
2485 Jump(adaptor, RelocInfo::CODE_TARGET);
2486 }
2487 }
2488 Bind(&regular_invoke);
2489}
2490
2491
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002492void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
2493 const ParameterCount& expected,
2494 const ParameterCount& actual) {
2495 Label skip_flooding;
Ben Murdoch61f157c2016-09-16 13:49:30 +01002496 ExternalReference last_step_action =
2497 ExternalReference::debug_last_step_action_address(isolate());
2498 STATIC_ASSERT(StepFrame > StepIn);
2499 Mov(x4, Operand(last_step_action));
2500 Ldrsb(x4, MemOperand(x4));
2501 CompareAndBranch(x4, Operand(StepIn), lt, &skip_flooding);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002502 {
2503 FrameScope frame(this,
2504 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
2505 if (expected.is_reg()) {
2506 SmiTag(expected.reg());
2507 Push(expected.reg());
2508 }
2509 if (actual.is_reg()) {
2510 SmiTag(actual.reg());
2511 Push(actual.reg());
2512 }
2513 if (new_target.is_valid()) {
2514 Push(new_target);
2515 }
2516 Push(fun);
2517 Push(fun);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002518 CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002519 Pop(fun);
2520 if (new_target.is_valid()) {
2521 Pop(new_target);
2522 }
2523 if (actual.is_reg()) {
2524 Pop(actual.reg());
2525 SmiUntag(actual.reg());
2526 }
2527 if (expected.is_reg()) {
2528 Pop(expected.reg());
2529 SmiUntag(expected.reg());
2530 }
2531 }
2532 bind(&skip_flooding);
2533}
2534
2535
2536void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
2537 const ParameterCount& expected,
2538 const ParameterCount& actual,
2539 InvokeFlag flag,
2540 const CallWrapper& call_wrapper) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002541 // You can't call a function without a valid frame.
2542 DCHECK(flag == JUMP_FUNCTION || has_frame());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002543 DCHECK(function.is(x1));
2544 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
2545
2546 FloodFunctionIfStepping(function, new_target, expected, actual);
2547
2548 // Clear the new.target register if not given.
2549 if (!new_target.is_valid()) {
2550 LoadRoot(x3, Heap::kUndefinedValueRootIndex);
2551 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002552
2553 Label done;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002554 bool definitely_mismatches = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002555 InvokePrologue(expected, actual, &done, flag, &definitely_mismatches,
2556 call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002557
2558 // If we are certain that actual != expected, then we know InvokePrologue will
2559 // have handled the call through the argument adaptor mechanism.
2560 // The called function expects the call kind in x5.
2561 if (!definitely_mismatches) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002562 // We call indirectly through the code field in the function to
2563 // allow recompilation to take effect without changing any of the
2564 // call sites.
2565 Register code = x4;
2566 Ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002567 if (flag == CALL_FUNCTION) {
2568 call_wrapper.BeforeCall(CallSize(code));
2569 Call(code);
2570 call_wrapper.AfterCall();
2571 } else {
2572 DCHECK(flag == JUMP_FUNCTION);
2573 Jump(code);
2574 }
2575 }
2576
2577 // Continue here if InvokePrologue does handle the invocation due to
2578 // mismatched parameter counts.
2579 Bind(&done);
2580}
2581
2582
2583void MacroAssembler::InvokeFunction(Register function,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002584 Register new_target,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002585 const ParameterCount& actual,
2586 InvokeFlag flag,
2587 const CallWrapper& call_wrapper) {
2588 // You can't call a function without a valid frame.
2589 DCHECK(flag == JUMP_FUNCTION || has_frame());
2590
2591 // Contract with called JS functions requires that function is passed in x1.
2592 // (See FullCodeGenerator::Generate().)
2593 DCHECK(function.is(x1));
2594
2595 Register expected_reg = x2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002596
2597 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2598 // The number of arguments is stored as an int32_t, and -1 is a marker
2599 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2600 // extension to correctly handle it.
2601 Ldr(expected_reg, FieldMemOperand(function,
2602 JSFunction::kSharedFunctionInfoOffset));
2603 Ldrsw(expected_reg,
2604 FieldMemOperand(expected_reg,
2605 SharedFunctionInfo::kFormalParameterCountOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002606
2607 ParameterCount expected(expected_reg);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002608 InvokeFunctionCode(function, new_target, expected, actual, flag,
2609 call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002610}
2611
2612
2613void MacroAssembler::InvokeFunction(Register function,
2614 const ParameterCount& expected,
2615 const ParameterCount& actual,
2616 InvokeFlag flag,
2617 const CallWrapper& call_wrapper) {
2618 // You can't call a function without a valid frame.
2619 DCHECK(flag == JUMP_FUNCTION || has_frame());
2620
2621 // Contract with called JS functions requires that function is passed in x1.
2622 // (See FullCodeGenerator::Generate().)
2623 DCHECK(function.Is(x1));
2624
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002625 // Set up the context.
2626 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2627
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002628 InvokeFunctionCode(function, no_reg, expected, actual, flag, call_wrapper);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002629}
2630
2631
2632void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2633 const ParameterCount& expected,
2634 const ParameterCount& actual,
2635 InvokeFlag flag,
2636 const CallWrapper& call_wrapper) {
2637 // Contract with called JS functions requires that function is passed in x1.
2638 // (See FullCodeGenerator::Generate().)
2639 __ LoadObject(x1, function);
2640 InvokeFunction(x1, expected, actual, flag, call_wrapper);
2641}
2642
2643
2644void MacroAssembler::TryConvertDoubleToInt64(Register result,
2645 DoubleRegister double_input,
2646 Label* done) {
2647 // Try to convert with an FPU convert instruction. It's trivial to compute
2648 // the modulo operation on an integer register so we convert to a 64-bit
2649 // integer.
2650 //
2651 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
2652 // when the double is out of range. NaNs and infinities will be converted to 0
2653 // (as ECMA-262 requires).
2654 Fcvtzs(result.X(), double_input);
2655
2656 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
2657 // representable using a double, so if the result is one of those then we know
2658 // that saturation occured, and we need to manually handle the conversion.
2659 //
2660 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2661 // 1 will cause signed overflow.
2662 Cmp(result.X(), 1);
2663 Ccmp(result.X(), -1, VFlag, vc);
2664
2665 B(vc, done);
2666}
2667
2668
2669void MacroAssembler::TruncateDoubleToI(Register result,
2670 DoubleRegister double_input) {
2671 Label done;
2672
2673 // Try to convert the double to an int64. If successful, the bottom 32 bits
2674 // contain our truncated int32 result.
2675 TryConvertDoubleToInt64(result, double_input, &done);
2676
2677 const Register old_stack_pointer = StackPointer();
2678 if (csp.Is(old_stack_pointer)) {
2679 // This currently only happens during compiler-unittest. If it arises
2680 // during regular code generation the DoubleToI stub should be updated to
2681 // cope with csp and have an extra parameter indicating which stack pointer
2682 // it should use.
2683 Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
2684 Mov(jssp, csp);
2685 SetStackPointer(jssp);
2686 }
2687
2688 // If we fell through then inline version didn't succeed - call stub instead.
2689 Push(lr, double_input);
2690
2691 DoubleToIStub stub(isolate(),
2692 jssp,
2693 result,
2694 0,
2695 true, // is_truncating
2696 true); // skip_fastpath
2697 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2698
2699 DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
2700 Pop(xzr, lr); // xzr to drop the double input on the stack.
2701
2702 if (csp.Is(old_stack_pointer)) {
2703 Mov(csp, jssp);
2704 SetStackPointer(csp);
2705 AssertStackConsistency();
2706 Pop(xzr, jssp);
2707 }
2708
2709 Bind(&done);
2710}
2711
2712
2713void MacroAssembler::TruncateHeapNumberToI(Register result,
2714 Register object) {
2715 Label done;
2716 DCHECK(!result.is(object));
2717 DCHECK(jssp.Is(StackPointer()));
2718
2719 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2720
2721 // Try to convert the double to an int64. If successful, the bottom 32 bits
2722 // contain our truncated int32 result.
2723 TryConvertDoubleToInt64(result, fp_scratch, &done);
2724
2725 // If we fell through then inline version didn't succeed - call stub instead.
2726 Push(lr);
2727 DoubleToIStub stub(isolate(),
2728 object,
2729 result,
2730 HeapNumber::kValueOffset - kHeapObjectTag,
2731 true, // is_truncating
2732 true); // skip_fastpath
2733 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2734 Pop(lr);
2735
2736 Bind(&done);
2737}
2738
Ben Murdochda12d292016-06-02 14:46:10 +01002739void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002740 UseScratchRegisterScope temps(this);
Ben Murdochda12d292016-06-02 14:46:10 +01002741 frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002742 Register temp = temps.AcquireX();
Ben Murdochda12d292016-06-02 14:46:10 +01002743 Mov(temp, Smi::FromInt(type));
2744 Push(lr, fp);
2745 Mov(fp, StackPointer());
2746 Claim(frame_slots);
2747 str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002748}
2749
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002750void MacroAssembler::Prologue(bool code_pre_aging) {
2751 if (code_pre_aging) {
2752 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
2753 __ EmitCodeAgeSequence(stub);
2754 } else {
2755 __ EmitFrameSetupForCodeAgePatching();
2756 }
2757}
2758
2759
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002760void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
2761 Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
Ben Murdoch61f157c2016-09-16 13:49:30 +01002762 Ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
2763 Ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002764}
2765
2766
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002767void MacroAssembler::EnterFrame(StackFrame::Type type,
2768 bool load_constant_pool_pointer_reg) {
2769 // Out-of-line constant pool not implemented on arm64.
2770 UNREACHABLE();
2771}
2772
2773
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002774void MacroAssembler::EnterFrame(StackFrame::Type type) {
2775 DCHECK(jssp.Is(StackPointer()));
2776 UseScratchRegisterScope temps(this);
2777 Register type_reg = temps.AcquireX();
2778 Register code_reg = temps.AcquireX();
2779
Ben Murdochda12d292016-06-02 14:46:10 +01002780 if (type == StackFrame::INTERNAL) {
2781 Mov(type_reg, Smi::FromInt(type));
2782 Push(lr, fp);
2783 Push(type_reg);
2784 Mov(code_reg, Operand(CodeObject()));
2785 Push(code_reg);
2786 Add(fp, jssp, InternalFrameConstants::kFixedFrameSizeFromFp);
2787 // jssp[4] : lr
2788 // jssp[3] : fp
2789 // jssp[1] : type
2790 // jssp[0] : [code object]
2791 } else {
2792 Mov(type_reg, Smi::FromInt(type));
2793 Push(lr, fp);
2794 Push(type_reg);
2795 Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
2796 // jssp[2] : lr
2797 // jssp[1] : fp
2798 // jssp[0] : type
2799 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002800}
2801
2802
2803void MacroAssembler::LeaveFrame(StackFrame::Type type) {
2804 DCHECK(jssp.Is(StackPointer()));
2805 // Drop the execution stack down to the frame pointer and restore
2806 // the caller frame pointer and return address.
2807 Mov(jssp, fp);
2808 AssertStackConsistency();
2809 Pop(fp, lr);
2810}
2811
2812
2813void MacroAssembler::ExitFramePreserveFPRegs() {
2814 PushCPURegList(kCallerSavedFP);
2815}
2816
2817
2818void MacroAssembler::ExitFrameRestoreFPRegs() {
2819 // Read the registers from the stack without popping them. The stack pointer
2820 // will be reset as part of the unwinding process.
2821 CPURegList saved_fp_regs = kCallerSavedFP;
2822 DCHECK(saved_fp_regs.Count() % 2 == 0);
2823
2824 int offset = ExitFrameConstants::kLastExitFrameField;
2825 while (!saved_fp_regs.IsEmpty()) {
2826 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
2827 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2828 offset -= 2 * kDRegSize;
2829 Ldp(dst1, dst0, MemOperand(fp, offset));
2830 }
2831}
2832
2833
2834void MacroAssembler::EnterExitFrame(bool save_doubles,
2835 const Register& scratch,
2836 int extra_space) {
2837 DCHECK(jssp.Is(StackPointer()));
2838
2839 // Set up the new stack frame.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002840 Push(lr, fp);
2841 Mov(fp, StackPointer());
Ben Murdochda12d292016-06-02 14:46:10 +01002842 Mov(scratch, Smi::FromInt(StackFrame::EXIT));
2843 Push(scratch);
2844 Push(xzr);
2845 Mov(scratch, Operand(CodeObject()));
2846 Push(scratch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002847 // fp[8]: CallerPC (lr)
2848 // fp -> fp[0]: CallerFP (old fp)
Ben Murdochda12d292016-06-02 14:46:10 +01002849 // fp[-8]: STUB marker
2850 // fp[-16]: Space reserved for SPOffset.
2851 // jssp -> fp[-24]: CodeObject()
2852 STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002853 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
2854 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
Ben Murdochda12d292016-06-02 14:46:10 +01002855 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kSPOffset);
2856 STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002857
2858 // Save the frame pointer and context pointer in the top frame.
2859 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2860 isolate())));
2861 Str(fp, MemOperand(scratch));
2862 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2863 isolate())));
2864 Str(cp, MemOperand(scratch));
2865
Ben Murdochda12d292016-06-02 14:46:10 +01002866 STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002867 if (save_doubles) {
2868 ExitFramePreserveFPRegs();
2869 }
2870
2871 // Reserve space for the return address and for user requested memory.
2872 // We do this before aligning to make sure that we end up correctly
2873 // aligned with the minimum of wasted space.
2874 Claim(extra_space + 1, kXRegSize);
2875 // fp[8]: CallerPC (lr)
2876 // fp -> fp[0]: CallerFP (old fp)
Ben Murdochda12d292016-06-02 14:46:10 +01002877 // fp[-8]: STUB marker
2878 // fp[-16]: Space reserved for SPOffset.
2879 // fp[-24]: CodeObject()
2880 // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002881 // jssp[8]: Extra space reserved for caller (if extra_space != 0).
2882 // jssp -> jssp[0]: Space reserved for the return address.
2883
2884 // Align and synchronize the system stack pointer with jssp.
2885 AlignAndSetCSPForFrame();
2886 DCHECK(csp.Is(StackPointer()));
2887
2888 // fp[8]: CallerPC (lr)
2889 // fp -> fp[0]: CallerFP (old fp)
Ben Murdochda12d292016-06-02 14:46:10 +01002890 // fp[-8]: STUB marker
2891 // fp[-16]: Space reserved for SPOffset.
2892 // fp[-24]: CodeObject()
2893 // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002894 // csp[8]: Memory reserved for the caller if extra_space != 0.
2895 // Alignment padding, if necessary.
2896 // csp -> csp[0]: Space reserved for the return address.
2897
2898 // ExitFrame::GetStateForFramePointer expects to find the return address at
2899 // the memory address immediately below the pointer stored in SPOffset.
2900 // It is not safe to derive much else from SPOffset, because the size of the
2901 // padding can vary.
2902 Add(scratch, csp, kXRegSize);
2903 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
2904}
2905
2906
2907// Leave the current exit frame.
2908void MacroAssembler::LeaveExitFrame(bool restore_doubles,
2909 const Register& scratch,
2910 bool restore_context) {
2911 DCHECK(csp.Is(StackPointer()));
2912
2913 if (restore_doubles) {
2914 ExitFrameRestoreFPRegs();
2915 }
2916
2917 // Restore the context pointer from the top frame.
2918 if (restore_context) {
2919 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2920 isolate())));
2921 Ldr(cp, MemOperand(scratch));
2922 }
2923
2924 if (emit_debug_code()) {
2925 // Also emit debug code to clear the cp in the top frame.
2926 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2927 isolate())));
2928 Str(xzr, MemOperand(scratch));
2929 }
2930 // Clear the frame pointer from the top frame.
2931 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2932 isolate())));
2933 Str(xzr, MemOperand(scratch));
2934
2935 // Pop the exit frame.
2936 // fp[8]: CallerPC (lr)
2937 // fp -> fp[0]: CallerFP (old fp)
2938 // fp[...]: The rest of the frame.
2939 Mov(jssp, fp);
2940 SetStackPointer(jssp);
2941 AssertStackConsistency();
2942 Pop(fp, lr);
2943}
2944
2945
2946void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2947 Register scratch1, Register scratch2) {
2948 if (FLAG_native_code_counters && counter->Enabled()) {
2949 Mov(scratch1, value);
2950 Mov(scratch2, ExternalReference(counter));
2951 Str(scratch1, MemOperand(scratch2));
2952 }
2953}
2954
2955
2956void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2957 Register scratch1, Register scratch2) {
2958 DCHECK(value != 0);
2959 if (FLAG_native_code_counters && counter->Enabled()) {
2960 Mov(scratch2, ExternalReference(counter));
2961 Ldr(scratch1, MemOperand(scratch2));
2962 Add(scratch1, scratch1, value);
2963 Str(scratch1, MemOperand(scratch2));
2964 }
2965}
2966
2967
2968void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2969 Register scratch1, Register scratch2) {
2970 IncrementCounter(counter, -value, scratch1, scratch2);
2971}
2972
2973
2974void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2975 if (context_chain_length > 0) {
2976 // Move up the chain of contexts to the context containing the slot.
2977 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2978 for (int i = 1; i < context_chain_length; i++) {
2979 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2980 }
2981 } else {
2982 // Slot is in the current function context. Move it into the
2983 // destination register in case we store into it (the write barrier
2984 // cannot be allowed to destroy the context in cp).
2985 Mov(dst, cp);
2986 }
2987}
2988
2989
2990void MacroAssembler::DebugBreak() {
2991 Mov(x0, 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002992 Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002993 CEntryStub ces(isolate(), 1);
2994 DCHECK(AllowThisStubCall(&ces));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002995 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002996}
2997
2998
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002999void MacroAssembler::PushStackHandler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003000 DCHECK(jssp.Is(StackPointer()));
3001 // Adjust this code if the asserts don't hold.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003002 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003003 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003004
3005 // For the JSEntry handler, we must preserve the live registers x0-x4.
3006 // (See JSEntryStub::GenerateBody().)
3007
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003008 // Link the current handler as the next handler.
3009 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3010 Ldr(x10, MemOperand(x11));
3011 Push(x10);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003012
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003013 // Set this new handler as the current one.
3014 Str(jssp, MemOperand(x11));
3015}
3016
3017
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003018void MacroAssembler::PopStackHandler() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003019 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3020 Pop(x10);
3021 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3022 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
3023 Str(x10, MemOperand(x11));
3024}
3025
3026
3027void MacroAssembler::Allocate(int object_size,
3028 Register result,
3029 Register scratch1,
3030 Register scratch2,
3031 Label* gc_required,
3032 AllocationFlags flags) {
3033 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
Ben Murdochc5610432016-08-08 18:44:38 +01003034 DCHECK((flags & ALLOCATION_FOLDED) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003035 if (!FLAG_inline_new) {
3036 if (emit_debug_code()) {
3037 // Trash the registers to simulate an allocation failure.
3038 // We apply salt to the original zap value to easily spot the values.
3039 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3040 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3041 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3042 }
3043 B(gc_required);
3044 return;
3045 }
3046
3047 UseScratchRegisterScope temps(this);
3048 Register scratch3 = temps.AcquireX();
3049
3050 DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
3051 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3052
3053 // Make object size into bytes.
3054 if ((flags & SIZE_IN_WORDS) != 0) {
3055 object_size *= kPointerSize;
3056 }
3057 DCHECK(0 == (object_size & kObjectAlignmentMask));
3058
3059 // Check relative positions of allocation top and limit addresses.
3060 // The values must be adjacent in memory to allow the use of LDP.
3061 ExternalReference heap_allocation_top =
3062 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3063 ExternalReference heap_allocation_limit =
3064 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3065 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3066 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3067 DCHECK((limit - top) == kPointerSize);
3068
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003069 // Set up allocation top address and allocation limit registers.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003070 Register top_address = scratch1;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003071 Register alloc_limit = scratch2;
3072 Register result_end = scratch3;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003073 Mov(top_address, Operand(heap_allocation_top));
3074
3075 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003076 // Load allocation top into result and allocation limit into alloc_limit.
3077 Ldp(result, alloc_limit, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003078 } else {
3079 if (emit_debug_code()) {
3080 // Assert that result actually contains top on entry.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003081 Ldr(alloc_limit, MemOperand(top_address));
3082 Cmp(result, alloc_limit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003083 Check(eq, kUnexpectedAllocationTop);
3084 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003085 // Load allocation limit. Result already contains allocation top.
3086 Ldr(alloc_limit, MemOperand(top_address, limit - top));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003087 }
3088
3089 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3090 // the same alignment on ARM64.
3091 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3092
3093 // Calculate new top and bail out if new space is exhausted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003094 Adds(result_end, result, object_size);
Ben Murdochc5610432016-08-08 18:44:38 +01003095 Ccmp(result_end, alloc_limit, NoFlag, cc);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003096 B(hi, gc_required);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003097
Ben Murdochc5610432016-08-08 18:44:38 +01003098 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
3099 // The top pointer is not updated for allocation folding dominators.
3100 Str(result_end, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003101 }
Ben Murdochc5610432016-08-08 18:44:38 +01003102
3103 // Tag the object.
3104 ObjectTag(result, result);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003105}
3106
3107
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003108void MacroAssembler::Allocate(Register object_size, Register result,
3109 Register result_end, Register scratch,
3110 Label* gc_required, AllocationFlags flags) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003111 if (!FLAG_inline_new) {
3112 if (emit_debug_code()) {
3113 // Trash the registers to simulate an allocation failure.
3114 // We apply salt to the original zap value to easily spot the values.
3115 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003116 Mov(scratch, (kDebugZapValue & ~0xffL) | 0x21L);
3117 Mov(result_end, (kDebugZapValue & ~0xffL) | 0x21L);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003118 }
3119 B(gc_required);
3120 return;
3121 }
3122
3123 UseScratchRegisterScope temps(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003124 Register scratch2 = temps.AcquireX();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003125
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003126 // |object_size| and |result_end| may overlap, other registers must not.
3127 DCHECK(!AreAliased(object_size, result, scratch, scratch2));
3128 DCHECK(!AreAliased(result_end, result, scratch, scratch2));
3129 DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
3130 result_end.Is64Bits());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003131
3132 // Check relative positions of allocation top and limit addresses.
3133 // The values must be adjacent in memory to allow the use of LDP.
3134 ExternalReference heap_allocation_top =
3135 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3136 ExternalReference heap_allocation_limit =
3137 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3138 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3139 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3140 DCHECK((limit - top) == kPointerSize);
3141
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003142 // Set up allocation top address and allocation limit registers.
3143 Register top_address = scratch;
3144 Register alloc_limit = scratch2;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003145 Mov(top_address, heap_allocation_top);
3146
3147 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003148 // Load allocation top into result and allocation limit into alloc_limit.
3149 Ldp(result, alloc_limit, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003150 } else {
3151 if (emit_debug_code()) {
3152 // Assert that result actually contains top on entry.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003153 Ldr(alloc_limit, MemOperand(top_address));
3154 Cmp(result, alloc_limit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003155 Check(eq, kUnexpectedAllocationTop);
3156 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003157 // Load allocation limit. Result already contains allocation top.
3158 Ldr(alloc_limit, MemOperand(top_address, limit - top));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003159 }
3160
3161 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3162 // the same alignment on ARM64.
3163 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3164
3165 // Calculate new top and bail out if new space is exhausted
3166 if ((flags & SIZE_IN_WORDS) != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003167 Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003168 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003169 Adds(result_end, result, object_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003170 }
3171
3172 if (emit_debug_code()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003173 Tst(result_end, kObjectAlignmentMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003174 Check(eq, kUnalignedAllocationInNewSpace);
3175 }
3176
Ben Murdochc5610432016-08-08 18:44:38 +01003177 Ccmp(result_end, alloc_limit, NoFlag, cc);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003178 B(hi, gc_required);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003179
Ben Murdochc5610432016-08-08 18:44:38 +01003180 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
3181 // The top pointer is not updated for allocation folding dominators.
3182 Str(result_end, MemOperand(top_address));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003183 }
Ben Murdochc5610432016-08-08 18:44:38 +01003184
3185 // Tag the object.
3186 ObjectTag(result, result);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003187}
3188
Ben Murdochc5610432016-08-08 18:44:38 +01003189void MacroAssembler::FastAllocate(int object_size, Register result,
3190 Register scratch1, Register scratch2,
3191 AllocationFlags flags) {
3192 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3193
3194 DCHECK(!AreAliased(result, scratch1, scratch2));
3195 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3196
3197 // Make object size into bytes.
3198 if ((flags & SIZE_IN_WORDS) != 0) {
3199 object_size *= kPointerSize;
3200 }
3201 DCHECK(0 == (object_size & kObjectAlignmentMask));
3202
3203 ExternalReference heap_allocation_top =
3204 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3205
3206 // Set up allocation top address and allocation limit registers.
3207 Register top_address = scratch1;
3208 Register result_end = scratch2;
3209 Mov(top_address, Operand(heap_allocation_top));
3210 Ldr(result, MemOperand(top_address));
3211
3212 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3213 // the same alignment on ARM64.
3214 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3215
3216 // Calculate new top and write it back.
3217 Adds(result_end, result, object_size);
3218 Str(result_end, MemOperand(top_address));
3219
3220 ObjectTag(result, result);
3221}
3222
3223void MacroAssembler::FastAllocate(Register object_size, Register result,
3224 Register result_end, Register scratch,
3225 AllocationFlags flags) {
3226 // |object_size| and |result_end| may overlap, other registers must not.
3227 DCHECK(!AreAliased(object_size, result, scratch));
3228 DCHECK(!AreAliased(result_end, result, scratch));
3229 DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
3230 result_end.Is64Bits());
3231
3232 ExternalReference heap_allocation_top =
3233 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3234
3235 // Set up allocation top address and allocation limit registers.
3236 Register top_address = scratch;
3237 Mov(top_address, heap_allocation_top);
3238 Ldr(result, MemOperand(top_address));
3239
3240 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3241 // the same alignment on ARM64.
3242 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3243
3244 // Calculate new top and write it back.
3245 if ((flags & SIZE_IN_WORDS) != 0) {
3246 Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
3247 } else {
3248 Adds(result_end, result, object_size);
3249 }
3250 Str(result_end, MemOperand(top_address));
3251
3252 if (emit_debug_code()) {
3253 Tst(result_end, kObjectAlignmentMask);
3254 Check(eq, kUnalignedAllocationInNewSpace);
3255 }
3256
3257 ObjectTag(result, result);
3258}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003259
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003260void MacroAssembler::AllocateTwoByteString(Register result,
3261 Register length,
3262 Register scratch1,
3263 Register scratch2,
3264 Register scratch3,
3265 Label* gc_required) {
3266 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3267 // Calculate the number of bytes needed for the characters in the string while
3268 // observing object alignment.
3269 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3270 Add(scratch1, length, length); // Length in bytes, not chars.
3271 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3272 Bic(scratch1, scratch1, kObjectAlignmentMask);
3273
3274 // Allocate two-byte string in new space.
Ben Murdochc5610432016-08-08 18:44:38 +01003275 Allocate(scratch1, result, scratch2, scratch3, gc_required,
3276 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003277
3278 // Set the map, length and hash field.
3279 InitializeNewString(result,
3280 length,
3281 Heap::kStringMapRootIndex,
3282 scratch1,
3283 scratch2);
3284}
3285
3286
3287void MacroAssembler::AllocateOneByteString(Register result, Register length,
3288 Register scratch1, Register scratch2,
3289 Register scratch3,
3290 Label* gc_required) {
3291 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3292 // Calculate the number of bytes needed for the characters in the string while
3293 // observing object alignment.
3294 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3295 STATIC_ASSERT(kCharSize == 1);
3296 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3297 Bic(scratch1, scratch1, kObjectAlignmentMask);
3298
3299 // Allocate one-byte string in new space.
Ben Murdochc5610432016-08-08 18:44:38 +01003300 Allocate(scratch1, result, scratch2, scratch3, gc_required,
3301 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003302
3303 // Set the map, length and hash field.
3304 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3305 scratch1, scratch2);
3306}
3307
3308
3309void MacroAssembler::AllocateTwoByteConsString(Register result,
3310 Register length,
3311 Register scratch1,
3312 Register scratch2,
3313 Label* gc_required) {
3314 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
Ben Murdochc5610432016-08-08 18:44:38 +01003315 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003316
3317 InitializeNewString(result,
3318 length,
3319 Heap::kConsStringMapRootIndex,
3320 scratch1,
3321 scratch2);
3322}
3323
3324
3325void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3326 Register scratch1,
3327 Register scratch2,
3328 Label* gc_required) {
Ben Murdochc5610432016-08-08 18:44:38 +01003329 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3330 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003331
3332 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3333 scratch1, scratch2);
3334}
3335
3336
3337void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3338 Register length,
3339 Register scratch1,
3340 Register scratch2,
3341 Label* gc_required) {
3342 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3343 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
Ben Murdochc5610432016-08-08 18:44:38 +01003344 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003345
3346 InitializeNewString(result,
3347 length,
3348 Heap::kSlicedStringMapRootIndex,
3349 scratch1,
3350 scratch2);
3351}
3352
3353
3354void MacroAssembler::AllocateOneByteSlicedString(Register result,
3355 Register length,
3356 Register scratch1,
3357 Register scratch2,
3358 Label* gc_required) {
3359 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3360 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
Ben Murdochc5610432016-08-08 18:44:38 +01003361 NO_ALLOCATION_FLAGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003362
3363 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3364 scratch1, scratch2);
3365}
3366
3367
3368// Allocates a heap number or jumps to the need_gc label if the young space
3369// is full and a scavenge is needed.
3370void MacroAssembler::AllocateHeapNumber(Register result,
3371 Label* gc_required,
3372 Register scratch1,
3373 Register scratch2,
3374 CPURegister value,
3375 CPURegister heap_number_map,
3376 MutableMode mode) {
3377 DCHECK(!value.IsValid() || value.Is64Bits());
3378 UseScratchRegisterScope temps(this);
3379
3380 // Allocate an object in the heap for the heap number and tag it as a heap
3381 // object.
3382 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3383 NO_ALLOCATION_FLAGS);
3384
3385 Heap::RootListIndex map_index = mode == MUTABLE
3386 ? Heap::kMutableHeapNumberMapRootIndex
3387 : Heap::kHeapNumberMapRootIndex;
3388
3389 // Prepare the heap number map.
3390 if (!heap_number_map.IsValid()) {
3391 // If we have a valid value register, use the same type of register to store
3392 // the map so we can use STP to store both in one instruction.
3393 if (value.IsValid() && value.IsFPRegister()) {
3394 heap_number_map = temps.AcquireD();
3395 } else {
3396 heap_number_map = scratch1;
3397 }
3398 LoadRoot(heap_number_map, map_index);
3399 }
3400 if (emit_debug_code()) {
3401 Register map;
3402 if (heap_number_map.IsFPRegister()) {
3403 map = scratch1;
3404 Fmov(map, DoubleRegister(heap_number_map));
3405 } else {
3406 map = Register(heap_number_map);
3407 }
3408 AssertRegisterIsRoot(map, map_index);
3409 }
3410
3411 // Store the heap number map and the value in the allocated object.
3412 if (value.IsSameSizeAndType(heap_number_map)) {
3413 STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
3414 HeapNumber::kValueOffset);
Ben Murdochc5610432016-08-08 18:44:38 +01003415 Stp(heap_number_map, value,
3416 FieldMemOperand(result, HeapObject::kMapOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003417 } else {
Ben Murdochc5610432016-08-08 18:44:38 +01003418 Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003419 if (value.IsValid()) {
Ben Murdochc5610432016-08-08 18:44:38 +01003420 Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003421 }
3422 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003423}
3424
3425
3426void MacroAssembler::JumpIfObjectType(Register object,
3427 Register map,
3428 Register type_reg,
3429 InstanceType type,
3430 Label* if_cond_pass,
3431 Condition cond) {
3432 CompareObjectType(object, map, type_reg, type);
3433 B(cond, if_cond_pass);
3434}
3435
3436
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003437void MacroAssembler::AllocateJSValue(Register result, Register constructor,
3438 Register value, Register scratch1,
3439 Register scratch2, Label* gc_required) {
3440 DCHECK(!result.is(constructor));
3441 DCHECK(!result.is(scratch1));
3442 DCHECK(!result.is(scratch2));
3443 DCHECK(!result.is(value));
3444
3445 // Allocate JSValue in new space.
Ben Murdochc5610432016-08-08 18:44:38 +01003446 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
3447 NO_ALLOCATION_FLAGS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003448
3449 // Initialize the JSValue.
3450 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
3451 Str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
3452 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
3453 Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
3454 Str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
3455 Str(value, FieldMemOperand(result, JSValue::kValueOffset));
3456 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
3457}
3458
3459
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003460void MacroAssembler::JumpIfNotObjectType(Register object,
3461 Register map,
3462 Register type_reg,
3463 InstanceType type,
3464 Label* if_not_object) {
3465 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
3466}
3467
3468
3469// Sets condition flags based on comparison, and returns type in type_reg.
3470void MacroAssembler::CompareObjectType(Register object,
3471 Register map,
3472 Register type_reg,
3473 InstanceType type) {
3474 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3475 CompareInstanceType(map, type_reg, type);
3476}
3477
3478
3479// Sets condition flags based on comparison, and returns type in type_reg.
3480void MacroAssembler::CompareInstanceType(Register map,
3481 Register type_reg,
3482 InstanceType type) {
3483 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3484 Cmp(type_reg, type);
3485}
3486
3487
3488void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
3489 UseScratchRegisterScope temps(this);
3490 Register obj_map = temps.AcquireX();
3491 Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
3492 CompareRoot(obj_map, index);
3493}
3494
3495
3496void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
3497 Handle<Map> map) {
3498 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3499 CompareMap(scratch, map);
3500}
3501
3502
3503void MacroAssembler::CompareMap(Register obj_map,
3504 Handle<Map> map) {
3505 Cmp(obj_map, Operand(map));
3506}
3507
3508
3509void MacroAssembler::CheckMap(Register obj,
3510 Register scratch,
3511 Handle<Map> map,
3512 Label* fail,
3513 SmiCheckType smi_check_type) {
3514 if (smi_check_type == DO_SMI_CHECK) {
3515 JumpIfSmi(obj, fail);
3516 }
3517
3518 CompareObjectMap(obj, scratch, map);
3519 B(ne, fail);
3520}
3521
3522
3523void MacroAssembler::CheckMap(Register obj,
3524 Register scratch,
3525 Heap::RootListIndex index,
3526 Label* fail,
3527 SmiCheckType smi_check_type) {
3528 if (smi_check_type == DO_SMI_CHECK) {
3529 JumpIfSmi(obj, fail);
3530 }
3531 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3532 JumpIfNotRoot(scratch, index, fail);
3533}
3534
3535
3536void MacroAssembler::CheckMap(Register obj_map,
3537 Handle<Map> map,
3538 Label* fail,
3539 SmiCheckType smi_check_type) {
3540 if (smi_check_type == DO_SMI_CHECK) {
3541 JumpIfSmi(obj_map, fail);
3542 }
3543
3544 CompareMap(obj_map, map);
3545 B(ne, fail);
3546}
3547
3548
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003549void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3550 Register scratch2, Handle<WeakCell> cell,
3551 Handle<Code> success,
3552 SmiCheckType smi_check_type) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003553 Label fail;
3554 if (smi_check_type == DO_SMI_CHECK) {
3555 JumpIfSmi(obj, &fail);
3556 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003557 Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3558 CmpWeakValue(scratch1, cell, scratch2);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003559 B(ne, &fail);
3560 Jump(success, RelocInfo::CODE_TARGET);
3561 Bind(&fail);
3562}
3563
3564
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003565void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
3566 Register scratch) {
3567 Mov(scratch, Operand(cell));
3568 Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
3569 Cmp(value, scratch);
3570}
3571
3572
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003573void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003574 Mov(value, Operand(cell));
3575 Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003576}
3577
3578
3579void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3580 Label* miss) {
3581 GetWeakValue(value, cell);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003582 JumpIfSmi(value, miss);
3583}
3584
3585
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003586void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
3587 UseScratchRegisterScope temps(this);
3588 Register temp = temps.AcquireX();
3589 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
3590 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3591 Tst(temp, mask);
3592}
3593
3594
3595void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
3596 // Load the map's "bit field 2".
3597 __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
3598 // Retrieve elements_kind from bit field 2.
3599 DecodeField<Map::ElementsKindBits>(result);
3600}
3601
3602
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003603void MacroAssembler::GetMapConstructor(Register result, Register map,
3604 Register temp, Register temp2) {
3605 Label done, loop;
3606 Ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
3607 Bind(&loop);
3608 JumpIfSmi(result, &done);
3609 CompareObjectType(result, temp, temp2, MAP_TYPE);
3610 B(ne, &done);
3611 Ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
3612 B(&loop);
3613 Bind(&done);
3614}
3615
3616
3617void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
3618 Register scratch, Label* miss) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003619 DCHECK(!AreAliased(function, result, scratch));
3620
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003621 // Get the prototype or initial map from the function.
3622 Ldr(result,
3623 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3624
3625 // If the prototype or initial map is the hole, don't return it and simply
3626 // miss the cache instead. This will allow us to allocate a prototype object
3627 // on-demand in the runtime system.
3628 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
3629
3630 // If the function does not have an initial map, we're done.
3631 Label done;
3632 JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
3633
3634 // Get the prototype from the initial map.
3635 Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3636
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003637 // All done.
3638 Bind(&done);
3639}
3640
3641
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003642void MacroAssembler::PushRoot(Heap::RootListIndex index) {
3643 UseScratchRegisterScope temps(this);
3644 Register temp = temps.AcquireX();
3645 LoadRoot(temp, index);
3646 Push(temp);
3647}
3648
3649
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003650void MacroAssembler::CompareRoot(const Register& obj,
3651 Heap::RootListIndex index) {
3652 UseScratchRegisterScope temps(this);
3653 Register temp = temps.AcquireX();
3654 DCHECK(!AreAliased(obj, temp));
3655 LoadRoot(temp, index);
3656 Cmp(obj, temp);
3657}
3658
3659
3660void MacroAssembler::JumpIfRoot(const Register& obj,
3661 Heap::RootListIndex index,
3662 Label* if_equal) {
3663 CompareRoot(obj, index);
3664 B(eq, if_equal);
3665}
3666
3667
3668void MacroAssembler::JumpIfNotRoot(const Register& obj,
3669 Heap::RootListIndex index,
3670 Label* if_not_equal) {
3671 CompareRoot(obj, index);
3672 B(ne, if_not_equal);
3673}
3674
3675
3676void MacroAssembler::CompareAndSplit(const Register& lhs,
3677 const Operand& rhs,
3678 Condition cond,
3679 Label* if_true,
3680 Label* if_false,
3681 Label* fall_through) {
3682 if ((if_true == if_false) && (if_false == fall_through)) {
3683 // Fall through.
3684 } else if (if_true == if_false) {
3685 B(if_true);
3686 } else if (if_false == fall_through) {
3687 CompareAndBranch(lhs, rhs, cond, if_true);
3688 } else if (if_true == fall_through) {
3689 CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
3690 } else {
3691 CompareAndBranch(lhs, rhs, cond, if_true);
3692 B(if_false);
3693 }
3694}
3695
3696
3697void MacroAssembler::TestAndSplit(const Register& reg,
3698 uint64_t bit_pattern,
3699 Label* if_all_clear,
3700 Label* if_any_set,
3701 Label* fall_through) {
3702 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3703 // Fall through.
3704 } else if (if_all_clear == if_any_set) {
3705 B(if_all_clear);
3706 } else if (if_all_clear == fall_through) {
3707 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3708 } else if (if_any_set == fall_through) {
3709 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
3710 } else {
3711 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3712 B(if_all_clear);
3713 }
3714}
3715
3716
3717void MacroAssembler::CheckFastElements(Register map,
3718 Register scratch,
3719 Label* fail) {
3720 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3721 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3722 STATIC_ASSERT(FAST_ELEMENTS == 2);
3723 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3724 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3725 Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
3726 B(hi, fail);
3727}
3728
3729
3730void MacroAssembler::CheckFastObjectElements(Register map,
3731 Register scratch,
3732 Label* fail) {
3733 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3734 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3735 STATIC_ASSERT(FAST_ELEMENTS == 2);
3736 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3737 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3738 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3739 // If cond==ls, set cond=hi, otherwise compare.
3740 Ccmp(scratch,
3741 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
3742 B(hi, fail);
3743}
3744
3745
3746// Note: The ARM version of this clobbers elements_reg, but this version does
3747// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
3748void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3749 Register key_reg,
3750 Register elements_reg,
3751 Register scratch1,
3752 FPRegister fpscratch1,
3753 Label* fail,
3754 int elements_offset) {
3755 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
3756 Label store_num;
3757
3758 // Speculatively convert the smi to a double - all smis can be exactly
3759 // represented as a double.
3760 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
3761
3762 // If value_reg is a smi, we're done.
3763 JumpIfSmi(value_reg, &store_num);
3764
3765 // Ensure that the object is a heap number.
3766 JumpIfNotHeapNumber(value_reg, fail);
3767
3768 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3769
3770 // Canonicalize NaNs.
3771 CanonicalizeNaN(fpscratch1);
3772
3773 // Store the result.
3774 Bind(&store_num);
3775 Add(scratch1, elements_reg,
3776 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
3777 Str(fpscratch1,
3778 FieldMemOperand(scratch1,
3779 FixedDoubleArray::kHeaderSize - elements_offset));
3780}
3781
3782
3783bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3784 return has_frame_ || !stub->SometimesSetsUpAFrame();
3785}
3786
3787
3788void MacroAssembler::IndexFromHash(Register hash, Register index) {
3789 // If the hash field contains an array index pick it out. The assert checks
3790 // that the constants for the maximum number of digits for an array index
3791 // cached in the hash field and the number of bits reserved for it does not
3792 // conflict.
3793 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
3794 (1 << String::kArrayIndexValueBits));
3795 DecodeField<String::ArrayIndexValueBits>(index, hash);
3796 SmiTag(index, index);
3797}
3798
3799
3800void MacroAssembler::EmitSeqStringSetCharCheck(
3801 Register string,
3802 Register index,
3803 SeqStringSetCharCheckIndexType index_type,
3804 Register scratch,
3805 uint32_t encoding_mask) {
3806 DCHECK(!AreAliased(string, index, scratch));
3807
3808 if (index_type == kIndexIsSmi) {
3809 AssertSmi(index);
3810 }
3811
3812 // Check that string is an object.
3813 AssertNotSmi(string, kNonObject);
3814
3815 // Check that string has an appropriate map.
3816 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
3817 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3818
3819 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
3820 Cmp(scratch, encoding_mask);
3821 Check(eq, kUnexpectedStringType);
3822
3823 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
3824 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
3825 Check(lt, kIndexIsTooLarge);
3826
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003827 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003828 Cmp(index, 0);
3829 Check(ge, kIndexIsNegative);
3830}
3831
3832
3833void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3834 Register scratch1,
3835 Register scratch2,
3836 Label* miss) {
3837 DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
3838 Label same_contexts;
3839
Ben Murdochda12d292016-06-02 14:46:10 +01003840 // Load current lexical context from the active StandardFrame, which
3841 // may require crawling past STUB frames.
3842 Label load_context;
3843 Label has_context;
3844 Mov(scratch2, fp);
3845 bind(&load_context);
3846 Ldr(scratch1,
3847 MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
3848 JumpIfNotSmi(scratch1, &has_context);
3849 Ldr(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
3850 B(&load_context);
3851 bind(&has_context);
3852
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003853 // In debug mode, make sure the lexical context is set.
3854#ifdef DEBUG
3855 Cmp(scratch1, 0);
3856 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
3857#endif
3858
3859 // Load the native context of the current context.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003860 Ldr(scratch1, ContextMemOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003861
3862 // Check the context is a native context.
3863 if (emit_debug_code()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003864 // Read the first word and compare to the native_context_map.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003865 Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
3866 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
3867 Check(eq, kExpectedNativeContext);
3868 }
3869
3870 // Check if both contexts are the same.
3871 Ldr(scratch2, FieldMemOperand(holder_reg,
3872 JSGlobalProxy::kNativeContextOffset));
3873 Cmp(scratch1, scratch2);
3874 B(&same_contexts, eq);
3875
3876 // Check the context is a native context.
3877 if (emit_debug_code()) {
3878 // We're short on scratch registers here, so use holder_reg as a scratch.
3879 Push(holder_reg);
3880 Register scratch3 = holder_reg;
3881
3882 CompareRoot(scratch2, Heap::kNullValueRootIndex);
3883 Check(ne, kExpectedNonNullContext);
3884
3885 Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
3886 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
3887 Check(eq, kExpectedNativeContext);
3888 Pop(holder_reg);
3889 }
3890
3891 // Check that the security token in the calling global object is
3892 // compatible with the security token in the receiving global
3893 // object.
3894 int token_offset = Context::kHeaderSize +
3895 Context::SECURITY_TOKEN_INDEX * kPointerSize;
3896
3897 Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
3898 Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
3899 Cmp(scratch1, scratch2);
3900 B(miss, ne);
3901
3902 Bind(&same_contexts);
3903}
3904
3905
3906// Compute the hash code from the untagged key. This must be kept in sync with
3907// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
3908// code-stub-hydrogen.cc
3909void MacroAssembler::GetNumberHash(Register key, Register scratch) {
3910 DCHECK(!AreAliased(key, scratch));
3911
3912 // Xor original key with a seed.
3913 LoadRoot(scratch, Heap::kHashSeedRootIndex);
3914 Eor(key, key, Operand::UntagSmi(scratch));
3915
3916 // The algorithm uses 32-bit integer values.
3917 key = key.W();
3918 scratch = scratch.W();
3919
3920 // Compute the hash code from the untagged key. This must be kept in sync
3921 // with ComputeIntegerHash in utils.h.
3922 //
3923 // hash = ~hash + (hash <<1 15);
3924 Mvn(scratch, key);
3925 Add(key, scratch, Operand(key, LSL, 15));
3926 // hash = hash ^ (hash >> 12);
3927 Eor(key, key, Operand(key, LSR, 12));
3928 // hash = hash + (hash << 2);
3929 Add(key, key, Operand(key, LSL, 2));
3930 // hash = hash ^ (hash >> 4);
3931 Eor(key, key, Operand(key, LSR, 4));
3932 // hash = hash * 2057;
3933 Mov(scratch, Operand(key, LSL, 11));
3934 Add(key, key, Operand(key, LSL, 3));
3935 Add(key, key, scratch);
3936 // hash = hash ^ (hash >> 16);
3937 Eor(key, key, Operand(key, LSR, 16));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003938 Bic(key, key, Operand(0xc0000000u));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003939}
3940
3941
3942void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3943 Register elements,
3944 Register key,
3945 Register result,
3946 Register scratch0,
3947 Register scratch1,
3948 Register scratch2,
3949 Register scratch3) {
3950 DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
3951
3952 Label done;
3953
3954 SmiUntag(scratch0, key);
3955 GetNumberHash(scratch0, scratch1);
3956
3957 // Compute the capacity mask.
3958 Ldrsw(scratch1,
3959 UntagSmiFieldMemOperand(elements,
3960 SeededNumberDictionary::kCapacityOffset));
3961 Sub(scratch1, scratch1, 1);
3962
3963 // Generate an unrolled loop that performs a few probes before giving up.
3964 for (int i = 0; i < kNumberDictionaryProbes; i++) {
3965 // Compute the masked index: (hash + i + i * i) & mask.
3966 if (i > 0) {
3967 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
3968 } else {
3969 Mov(scratch2, scratch0);
3970 }
3971 And(scratch2, scratch2, scratch1);
3972
3973 // Scale the index by multiplying by the element size.
3974 DCHECK(SeededNumberDictionary::kEntrySize == 3);
3975 Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
3976
3977 // Check if the key is identical to the name.
3978 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
3979 Ldr(scratch3,
3980 FieldMemOperand(scratch2,
3981 SeededNumberDictionary::kElementsStartOffset));
3982 Cmp(key, scratch3);
3983 if (i != (kNumberDictionaryProbes - 1)) {
3984 B(eq, &done);
3985 } else {
3986 B(ne, miss);
3987 }
3988 }
3989
3990 Bind(&done);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003991 // Check that the value is a field property.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003992 const int kDetailsOffset =
3993 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
3994 Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003995 DCHECK_EQ(DATA, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003996 TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
3997
3998 // Get the value at the masked, scaled index and return.
3999 const int kValueOffset =
4000 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4001 Ldr(result, FieldMemOperand(scratch2, kValueOffset));
4002}
4003
Ben Murdoch097c5b22016-05-18 11:27:45 +01004004void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
4005 Register code_entry,
4006 Register scratch) {
4007 const int offset = JSFunction::kCodeEntryOffset;
4008
4009 // Since a code entry (value) is always in old space, we don't need to update
4010 // remembered set. If incremental marking is off, there is nothing for us to
4011 // do.
4012 if (!FLAG_incremental_marking) return;
4013
4014 DCHECK(js_function.is(x1));
4015 DCHECK(code_entry.is(x7));
4016 DCHECK(scratch.is(x5));
4017 AssertNotSmi(js_function);
4018
4019 if (emit_debug_code()) {
4020 UseScratchRegisterScope temps(this);
4021 Register temp = temps.AcquireX();
4022 Add(scratch, js_function, offset - kHeapObjectTag);
4023 Ldr(temp, MemOperand(scratch));
4024 Cmp(temp, code_entry);
4025 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4026 }
4027
4028 // First, check if a write barrier is even needed. The tests below
4029 // catch stores of Smis and stores into young gen.
4030 Label done;
4031
4032 CheckPageFlagClear(code_entry, scratch,
4033 MemoryChunk::kPointersToHereAreInterestingMask, &done);
4034 CheckPageFlagClear(js_function, scratch,
4035 MemoryChunk::kPointersFromHereAreInterestingMask, &done);
4036
4037 const Register dst = scratch;
4038 Add(dst, js_function, offset - kHeapObjectTag);
4039
4040 // Save caller-saved registers.Both input registers (x1 and x7) are caller
4041 // saved, so there is no need to push them.
4042 PushCPURegList(kCallerSaved);
4043
4044 int argument_count = 3;
4045
4046 Mov(x0, js_function);
4047 Mov(x1, dst);
4048 Mov(x2, ExternalReference::isolate_address(isolate()));
4049
4050 {
4051 AllowExternalCallThatCantCauseGC scope(this);
4052 CallCFunction(
4053 ExternalReference::incremental_marking_record_write_code_entry_function(
4054 isolate()),
4055 argument_count);
4056 }
4057
4058 // Restore caller-saved registers.
4059 PopCPURegList(kCallerSaved);
4060
4061 Bind(&done);
4062}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004063
4064void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
4065 Register address,
4066 Register scratch1,
4067 SaveFPRegsMode fp_mode,
4068 RememberedSetFinalAction and_then) {
4069 DCHECK(!AreAliased(object, address, scratch1));
4070 Label done, store_buffer_overflow;
4071 if (emit_debug_code()) {
4072 Label ok;
4073 JumpIfNotInNewSpace(object, &ok);
4074 Abort(kRememberedSetPointerInNewSpace);
4075 bind(&ok);
4076 }
4077 UseScratchRegisterScope temps(this);
4078 Register scratch2 = temps.AcquireX();
4079
4080 // Load store buffer top.
4081 Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
4082 Ldr(scratch1, MemOperand(scratch2));
4083 // Store pointer to buffer and increment buffer top.
4084 Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
4085 // Write back new top of buffer.
4086 Str(scratch1, MemOperand(scratch2));
4087 // Call stub on end of buffer.
4088 // Check for end of buffer.
Ben Murdochda12d292016-06-02 14:46:10 +01004089 Tst(scratch1, StoreBuffer::kStoreBufferMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004090 if (and_then == kFallThroughAtEnd) {
Ben Murdochda12d292016-06-02 14:46:10 +01004091 B(ne, &done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004092 } else {
4093 DCHECK(and_then == kReturnAtEnd);
Ben Murdochda12d292016-06-02 14:46:10 +01004094 B(eq, &store_buffer_overflow);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004095 Ret();
4096 }
4097
4098 Bind(&store_buffer_overflow);
4099 Push(lr);
4100 StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
4101 CallStub(&store_buffer_overflow_stub);
4102 Pop(lr);
4103
4104 Bind(&done);
4105 if (and_then == kReturnAtEnd) {
4106 Ret();
4107 }
4108}
4109
4110
4111void MacroAssembler::PopSafepointRegisters() {
4112 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4113 PopXRegList(kSafepointSavedRegisters);
4114 Drop(num_unsaved);
4115}
4116
4117
4118void MacroAssembler::PushSafepointRegisters() {
4119 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
4120 // adjust the stack for unsaved registers.
4121 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4122 DCHECK(num_unsaved >= 0);
4123 Claim(num_unsaved);
4124 PushXRegList(kSafepointSavedRegisters);
4125}
4126
4127
4128void MacroAssembler::PushSafepointRegistersAndDoubles() {
4129 PushSafepointRegisters();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004130 PushCPURegList(CPURegList(
4131 CPURegister::kFPRegister, kDRegSizeInBits,
Ben Murdoch61f157c2016-09-16 13:49:30 +01004132 RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004133}
4134
4135
4136void MacroAssembler::PopSafepointRegistersAndDoubles() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004137 PopCPURegList(CPURegList(
4138 CPURegister::kFPRegister, kDRegSizeInBits,
Ben Murdoch61f157c2016-09-16 13:49:30 +01004139 RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004140 PopSafepointRegisters();
4141}
4142
4143
4144int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
4145 // Make sure the safepoint registers list is what we expect.
4146 DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
4147
4148 // Safepoint registers are stored contiguously on the stack, but not all the
4149 // registers are saved. The following registers are excluded:
4150 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
4151 // the macro assembler.
4152 // - x28 (jssp) because JS stack pointer doesn't need to be included in
4153 // safepoint registers.
4154 // - x31 (csp) because the system stack pointer doesn't need to be included
4155 // in safepoint registers.
4156 //
4157 // This function implements the mapping of register code to index into the
4158 // safepoint register slots.
4159 if ((reg_code >= 0) && (reg_code <= 15)) {
4160 return reg_code;
4161 } else if ((reg_code >= 18) && (reg_code <= 27)) {
4162 // Skip ip0 and ip1.
4163 return reg_code - 2;
4164 } else if ((reg_code == 29) || (reg_code == 30)) {
4165 // Also skip jssp.
4166 return reg_code - 3;
4167 } else {
4168 // This register has no safepoint register slot.
4169 UNREACHABLE();
4170 return -1;
4171 }
4172}
4173
Ben Murdoch097c5b22016-05-18 11:27:45 +01004174void MacroAssembler::CheckPageFlag(const Register& object,
4175 const Register& scratch, int mask,
4176 Condition cc, Label* condition_met) {
4177 And(scratch, object, ~Page::kPageAlignmentMask);
4178 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4179 if (cc == eq) {
4180 TestAndBranchIfAnySet(scratch, mask, condition_met);
4181 } else {
4182 TestAndBranchIfAllClear(scratch, mask, condition_met);
4183 }
4184}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004185
4186void MacroAssembler::CheckPageFlagSet(const Register& object,
4187 const Register& scratch,
4188 int mask,
4189 Label* if_any_set) {
4190 And(scratch, object, ~Page::kPageAlignmentMask);
4191 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4192 TestAndBranchIfAnySet(scratch, mask, if_any_set);
4193}
4194
4195
4196void MacroAssembler::CheckPageFlagClear(const Register& object,
4197 const Register& scratch,
4198 int mask,
4199 Label* if_all_clear) {
4200 And(scratch, object, ~Page::kPageAlignmentMask);
4201 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4202 TestAndBranchIfAllClear(scratch, mask, if_all_clear);
4203}
4204
4205
4206void MacroAssembler::RecordWriteField(
4207 Register object,
4208 int offset,
4209 Register value,
4210 Register scratch,
4211 LinkRegisterStatus lr_status,
4212 SaveFPRegsMode save_fp,
4213 RememberedSetAction remembered_set_action,
4214 SmiCheck smi_check,
4215 PointersToHereCheck pointers_to_here_check_for_value) {
4216 // First, check if a write barrier is even needed. The tests below
4217 // catch stores of Smis.
4218 Label done;
4219
4220 // Skip the barrier if writing a smi.
4221 if (smi_check == INLINE_SMI_CHECK) {
4222 JumpIfSmi(value, &done);
4223 }
4224
4225 // Although the object register is tagged, the offset is relative to the start
4226 // of the object, so offset must be a multiple of kPointerSize.
4227 DCHECK(IsAligned(offset, kPointerSize));
4228
4229 Add(scratch, object, offset - kHeapObjectTag);
4230 if (emit_debug_code()) {
4231 Label ok;
4232 Tst(scratch, (1 << kPointerSizeLog2) - 1);
4233 B(eq, &ok);
4234 Abort(kUnalignedCellInWriteBarrier);
4235 Bind(&ok);
4236 }
4237
4238 RecordWrite(object,
4239 scratch,
4240 value,
4241 lr_status,
4242 save_fp,
4243 remembered_set_action,
4244 OMIT_SMI_CHECK,
4245 pointers_to_here_check_for_value);
4246
4247 Bind(&done);
4248
4249 // Clobber clobbered input registers when running with the debug-code flag
4250 // turned on to provoke errors.
4251 if (emit_debug_code()) {
4252 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
4253 Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
4254 }
4255}
4256
4257
4258// Will clobber: object, map, dst.
4259// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4260void MacroAssembler::RecordWriteForMap(Register object,
4261 Register map,
4262 Register dst,
4263 LinkRegisterStatus lr_status,
4264 SaveFPRegsMode fp_mode) {
4265 ASM_LOCATION("MacroAssembler::RecordWrite");
4266 DCHECK(!AreAliased(object, map));
4267
4268 if (emit_debug_code()) {
4269 UseScratchRegisterScope temps(this);
4270 Register temp = temps.AcquireX();
4271
4272 CompareObjectMap(map, temp, isolate()->factory()->meta_map());
4273 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4274 }
4275
4276 if (!FLAG_incremental_marking) {
4277 return;
4278 }
4279
4280 if (emit_debug_code()) {
4281 UseScratchRegisterScope temps(this);
4282 Register temp = temps.AcquireX();
4283
4284 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4285 Cmp(temp, map);
4286 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4287 }
4288
4289 // First, check if a write barrier is even needed. The tests below
4290 // catch stores of smis and stores into the young generation.
4291 Label done;
4292
4293 // A single check of the map's pages interesting flag suffices, since it is
4294 // only set during incremental collection, and then it's also guaranteed that
4295 // the from object's page's interesting flag is also set. This optimization
4296 // relies on the fact that maps can never be in new space.
4297 CheckPageFlagClear(map,
4298 map, // Used as scratch.
4299 MemoryChunk::kPointersToHereAreInterestingMask,
4300 &done);
4301
4302 // Record the actual write.
4303 if (lr_status == kLRHasNotBeenSaved) {
4304 Push(lr);
4305 }
4306 Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
4307 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
4308 fp_mode);
4309 CallStub(&stub);
4310 if (lr_status == kLRHasNotBeenSaved) {
4311 Pop(lr);
4312 }
4313
4314 Bind(&done);
4315
4316 // Count number of write barriers in generated code.
4317 isolate()->counters()->write_barriers_static()->Increment();
4318 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
4319 dst);
4320
4321 // Clobber clobbered registers when running with the debug-code flag
4322 // turned on to provoke errors.
4323 if (emit_debug_code()) {
4324 Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
4325 Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
4326 }
4327}
4328
4329
4330// Will clobber: object, address, value.
4331// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4332//
4333// The register 'object' contains a heap object pointer. The heap object tag is
4334// shifted away.
4335void MacroAssembler::RecordWrite(
4336 Register object,
4337 Register address,
4338 Register value,
4339 LinkRegisterStatus lr_status,
4340 SaveFPRegsMode fp_mode,
4341 RememberedSetAction remembered_set_action,
4342 SmiCheck smi_check,
4343 PointersToHereCheck pointers_to_here_check_for_value) {
4344 ASM_LOCATION("MacroAssembler::RecordWrite");
4345 DCHECK(!AreAliased(object, value));
4346
4347 if (emit_debug_code()) {
4348 UseScratchRegisterScope temps(this);
4349 Register temp = temps.AcquireX();
4350
4351 Ldr(temp, MemOperand(address));
4352 Cmp(temp, value);
4353 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4354 }
4355
4356 // First, check if a write barrier is even needed. The tests below
4357 // catch stores of smis and stores into the young generation.
4358 Label done;
4359
4360 if (smi_check == INLINE_SMI_CHECK) {
4361 DCHECK_EQ(0, kSmiTag);
4362 JumpIfSmi(value, &done);
4363 }
4364
4365 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
4366 CheckPageFlagClear(value,
4367 value, // Used as scratch.
4368 MemoryChunk::kPointersToHereAreInterestingMask,
4369 &done);
4370 }
4371 CheckPageFlagClear(object,
4372 value, // Used as scratch.
4373 MemoryChunk::kPointersFromHereAreInterestingMask,
4374 &done);
4375
4376 // Record the actual write.
4377 if (lr_status == kLRHasNotBeenSaved) {
4378 Push(lr);
4379 }
4380 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
4381 fp_mode);
4382 CallStub(&stub);
4383 if (lr_status == kLRHasNotBeenSaved) {
4384 Pop(lr);
4385 }
4386
4387 Bind(&done);
4388
4389 // Count number of write barriers in generated code.
4390 isolate()->counters()->write_barriers_static()->Increment();
4391 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
4392 value);
4393
4394 // Clobber clobbered registers when running with the debug-code flag
4395 // turned on to provoke errors.
4396 if (emit_debug_code()) {
4397 Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
4398 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
4399 }
4400}
4401
4402
4403void MacroAssembler::AssertHasValidColor(const Register& reg) {
4404 if (emit_debug_code()) {
4405 // The bit sequence is backward. The first character in the string
4406 // represents the least significant bit.
4407 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4408
4409 Label color_is_valid;
4410 Tbnz(reg, 0, &color_is_valid);
4411 Tbz(reg, 1, &color_is_valid);
4412 Abort(kUnexpectedColorFound);
4413 Bind(&color_is_valid);
4414 }
4415}
4416
4417
4418void MacroAssembler::GetMarkBits(Register addr_reg,
4419 Register bitmap_reg,
4420 Register shift_reg) {
4421 DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
4422 DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4423 // addr_reg is divided into fields:
4424 // |63 page base 20|19 high 8|7 shift 3|2 0|
4425 // 'high' gives the index of the cell holding color bits for the object.
4426 // 'shift' gives the offset in the cell for this object's color.
4427 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4428 UseScratchRegisterScope temps(this);
4429 Register temp = temps.AcquireX();
4430 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
4431 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4432 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
4433 // bitmap_reg:
4434 // |63 page base 20|19 zeros 15|14 high 3|2 0|
4435 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4436}
4437
4438
4439void MacroAssembler::HasColor(Register object,
4440 Register bitmap_scratch,
4441 Register shift_scratch,
4442 Label* has_color,
4443 int first_bit,
4444 int second_bit) {
4445 // See mark-compact.h for color definitions.
4446 DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
4447
4448 GetMarkBits(object, bitmap_scratch, shift_scratch);
4449 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4450 // Shift the bitmap down to get the color of the object in bits [1:0].
4451 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4452
4453 AssertHasValidColor(bitmap_scratch);
4454
4455 // These bit sequences are backwards. The first character in the string
4456 // represents the least significant bit.
4457 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004458 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4459 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004460
4461 // Check for the color.
4462 if (first_bit == 0) {
4463 // Checking for white.
4464 DCHECK(second_bit == 0);
4465 // We only need to test the first bit.
4466 Tbz(bitmap_scratch, 0, has_color);
4467 } else {
4468 Label other_color;
4469 // Checking for grey or black.
4470 Tbz(bitmap_scratch, 0, &other_color);
4471 if (second_bit == 0) {
4472 Tbz(bitmap_scratch, 1, has_color);
4473 } else {
4474 Tbnz(bitmap_scratch, 1, has_color);
4475 }
4476 Bind(&other_color);
4477 }
4478
4479 // Fall through if it does not have the right color.
4480}
4481
4482
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004483void MacroAssembler::JumpIfBlack(Register object,
4484 Register scratch0,
4485 Register scratch1,
4486 Label* on_black) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004487 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4488 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004489}
4490
4491
4492void MacroAssembler::JumpIfDictionaryInPrototypeChain(
4493 Register object,
4494 Register scratch0,
4495 Register scratch1,
4496 Label* found) {
4497 DCHECK(!AreAliased(object, scratch0, scratch1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004498 Register current = scratch0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004499 Label loop_again, end;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004500
4501 // Scratch contains elements pointer.
4502 Mov(current, object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004503 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4504 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4505 CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004506
4507 // Loop based on the map going up the prototype chain.
4508 Bind(&loop_again);
4509 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004510 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
4511 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
4512 CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
4513 B(lo, found);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004514 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4515 DecodeField<Map::ElementsKindBits>(scratch1);
4516 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
4517 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004518 CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);
4519
4520 Bind(&end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004521}
4522
4523
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004524void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
4525 Register shift_scratch, Register load_scratch,
4526 Register length_scratch,
4527 Label* value_is_white) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004528 DCHECK(!AreAliased(
4529 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4530
4531 // These bit sequences are backwards. The first character in the string
4532 // represents the least significant bit.
4533 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004534 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4535 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004536
4537 GetMarkBits(value, bitmap_scratch, shift_scratch);
4538 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4539 Lsr(load_scratch, load_scratch, shift_scratch);
4540
4541 AssertHasValidColor(load_scratch);
4542
4543 // If the value is black or grey we don't need to do anything.
4544 // Since both black and grey have a 1 in the first position and white does
4545 // not have a 1 there we only need to check one bit.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004546 Tbz(load_scratch, 0, value_is_white);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004547}
4548
4549
4550void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
4551 if (emit_debug_code()) {
4552 Check(cond, reason);
4553 }
4554}
4555
4556
4557
4558void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
4559 if (emit_debug_code()) {
4560 CheckRegisterIsClear(reg, reason);
4561 }
4562}
4563
4564
4565void MacroAssembler::AssertRegisterIsRoot(Register reg,
4566 Heap::RootListIndex index,
4567 BailoutReason reason) {
4568 if (emit_debug_code()) {
4569 CompareRoot(reg, index);
4570 Check(eq, reason);
4571 }
4572}
4573
4574
4575void MacroAssembler::AssertFastElements(Register elements) {
4576 if (emit_debug_code()) {
4577 UseScratchRegisterScope temps(this);
4578 Register temp = temps.AcquireX();
4579 Label ok;
4580 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
4581 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4582 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4583 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4584 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4585 Bind(&ok);
4586 }
4587}
4588
4589
4590void MacroAssembler::AssertIsString(const Register& object) {
4591 if (emit_debug_code()) {
4592 UseScratchRegisterScope temps(this);
4593 Register temp = temps.AcquireX();
4594 STATIC_ASSERT(kSmiTag == 0);
4595 Tst(object, kSmiTagMask);
4596 Check(ne, kOperandIsNotAString);
4597 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4598 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
4599 Check(lo, kOperandIsNotAString);
4600 }
4601}
4602
4603
4604void MacroAssembler::Check(Condition cond, BailoutReason reason) {
4605 Label ok;
4606 B(cond, &ok);
4607 Abort(reason);
4608 // Will not return here.
4609 Bind(&ok);
4610}
4611
4612
4613void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
4614 Label ok;
4615 Cbz(reg, &ok);
4616 Abort(reason);
4617 // Will not return here.
4618 Bind(&ok);
4619}
4620
4621
4622void MacroAssembler::Abort(BailoutReason reason) {
4623#ifdef DEBUG
4624 RecordComment("Abort message: ");
4625 RecordComment(GetBailoutReason(reason));
4626
4627 if (FLAG_trap_on_abort) {
4628 Brk(0);
4629 return;
4630 }
4631#endif
4632
4633 // Abort is used in some contexts where csp is the stack pointer. In order to
4634 // simplify the CallRuntime code, make sure that jssp is the stack pointer.
4635 // There is no risk of register corruption here because Abort doesn't return.
4636 Register old_stack_pointer = StackPointer();
4637 SetStackPointer(jssp);
4638 Mov(jssp, old_stack_pointer);
4639
4640 // We need some scratch registers for the MacroAssembler, so make sure we have
4641 // some. This is safe here because Abort never returns.
4642 RegList old_tmp_list = TmpList()->list();
4643 TmpList()->Combine(MacroAssembler::DefaultTmpList());
4644
4645 if (use_real_aborts()) {
4646 // Avoid infinite recursion; Push contains some assertions that use Abort.
4647 NoUseRealAbortsScope no_real_aborts(this);
4648
4649 Mov(x0, Smi::FromInt(reason));
4650 Push(x0);
4651
4652 if (!has_frame_) {
4653 // We don't actually want to generate a pile of code for this, so just
4654 // claim there is a stack frame, without generating one.
4655 FrameScope scope(this, StackFrame::NONE);
Ben Murdoch097c5b22016-05-18 11:27:45 +01004656 CallRuntime(Runtime::kAbort);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004657 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01004658 CallRuntime(Runtime::kAbort);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004659 }
4660 } else {
4661 // Load the string to pass to Printf.
4662 Label msg_address;
4663 Adr(x0, &msg_address);
4664
4665 // Call Printf directly to report the error.
4666 CallPrintf();
4667
4668 // We need a way to stop execution on both the simulator and real hardware,
4669 // and Unreachable() is the best option.
4670 Unreachable();
4671
4672 // Emit the message string directly in the instruction stream.
4673 {
4674 BlockPoolsScope scope(this);
4675 Bind(&msg_address);
4676 EmitStringData(GetBailoutReason(reason));
4677 }
4678 }
4679
4680 SetStackPointer(old_stack_pointer);
4681 TmpList()->set_list(old_tmp_list);
4682}
4683
4684
4685void MacroAssembler::LoadTransitionedArrayMapConditional(
4686 ElementsKind expected_kind,
4687 ElementsKind transitioned_kind,
4688 Register map_in_out,
4689 Register scratch1,
4690 Register scratch2,
4691 Label* no_map_match) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004692 DCHECK(IsFastElementsKind(expected_kind));
4693 DCHECK(IsFastElementsKind(transitioned_kind));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004694
4695 // Check that the function's map is the same as the expected cached map.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004696 Ldr(scratch1, NativeContextMemOperand());
4697 Ldr(scratch2,
4698 ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004699 Cmp(map_in_out, scratch2);
4700 B(ne, no_map_match);
4701
4702 // Use the transitioned cached map.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004703 Ldr(map_in_out,
4704 ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004705}
4706
4707
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004708void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
4709 Ldr(dst, NativeContextMemOperand());
4710 Ldr(dst, ContextMemOperand(dst, index));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004711}
4712
4713
4714void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4715 Register map,
4716 Register scratch) {
4717 // Load the initial map. The global functions all have initial maps.
4718 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4719 if (emit_debug_code()) {
4720 Label ok, fail;
4721 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4722 B(&ok);
4723 Bind(&fail);
4724 Abort(kGlobalFunctionsMustHaveInitialMap);
4725 Bind(&ok);
4726 }
4727}
4728
4729
4730// This is the main Printf implementation. All other Printf variants call
4731// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
4732void MacroAssembler::PrintfNoPreserve(const char * format,
4733 const CPURegister& arg0,
4734 const CPURegister& arg1,
4735 const CPURegister& arg2,
4736 const CPURegister& arg3) {
4737 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4738 // in most cases anyway, so this restriction shouldn't be too serious.
4739 DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4740
4741 // The provided arguments, and their proper procedure-call standard registers.
4742 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
4743 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
4744
4745 int arg_count = kPrintfMaxArgCount;
4746
4747 // The PCS varargs registers for printf. Note that x0 is used for the printf
4748 // format string.
4749 static const CPURegList kPCSVarargs =
4750 CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
4751 static const CPURegList kPCSVarargsFP =
4752 CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
4753
4754 // We can use caller-saved registers as scratch values, except for the
4755 // arguments and the PCS registers where they might need to go.
4756 CPURegList tmp_list = kCallerSaved;
4757 tmp_list.Remove(x0); // Used to pass the format string.
4758 tmp_list.Remove(kPCSVarargs);
4759 tmp_list.Remove(arg0, arg1, arg2, arg3);
4760
4761 CPURegList fp_tmp_list = kCallerSavedFP;
4762 fp_tmp_list.Remove(kPCSVarargsFP);
4763 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4764
4765 // Override the MacroAssembler's scratch register list. The lists will be
4766 // reset automatically at the end of the UseScratchRegisterScope.
4767 UseScratchRegisterScope temps(this);
4768 TmpList()->set_list(tmp_list.list());
4769 FPTmpList()->set_list(fp_tmp_list.list());
4770
4771 // Copies of the printf vararg registers that we can pop from.
4772 CPURegList pcs_varargs = kPCSVarargs;
4773 CPURegList pcs_varargs_fp = kPCSVarargsFP;
4774
4775 // Place the arguments. There are lots of clever tricks and optimizations we
4776 // could use here, but Printf is a debug tool so instead we just try to keep
4777 // it simple: Move each input that isn't already in the right place to a
4778 // scratch register, then move everything back.
4779 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
4780 // Work out the proper PCS register for this argument.
4781 if (args[i].IsRegister()) {
4782 pcs[i] = pcs_varargs.PopLowestIndex().X();
4783 // We might only need a W register here. We need to know the size of the
4784 // argument so we can properly encode it for the simulator call.
4785 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
4786 } else if (args[i].IsFPRegister()) {
4787 // In C, floats are always cast to doubles for varargs calls.
4788 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
4789 } else {
4790 DCHECK(args[i].IsNone());
4791 arg_count = i;
4792 break;
4793 }
4794
4795 // If the argument is already in the right place, leave it where it is.
4796 if (args[i].Aliases(pcs[i])) continue;
4797
4798 // Otherwise, if the argument is in a PCS argument register, allocate an
4799 // appropriate scratch register and then move it out of the way.
4800 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
4801 kPCSVarargsFP.IncludesAliasOf(args[i])) {
4802 if (args[i].IsRegister()) {
4803 Register old_arg = Register(args[i]);
4804 Register new_arg = temps.AcquireSameSizeAs(old_arg);
4805 Mov(new_arg, old_arg);
4806 args[i] = new_arg;
4807 } else {
4808 FPRegister old_arg = FPRegister(args[i]);
4809 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
4810 Fmov(new_arg, old_arg);
4811 args[i] = new_arg;
4812 }
4813 }
4814 }
4815
4816 // Do a second pass to move values into their final positions and perform any
4817 // conversions that may be required.
4818 for (int i = 0; i < arg_count; i++) {
4819 DCHECK(pcs[i].type() == args[i].type());
4820 if (pcs[i].IsRegister()) {
4821 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
4822 } else {
4823 DCHECK(pcs[i].IsFPRegister());
4824 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
4825 Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
4826 } else {
4827 Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
4828 }
4829 }
4830 }
4831
4832 // Load the format string into x0, as per the procedure-call standard.
4833 //
4834 // To make the code as portable as possible, the format string is encoded
4835 // directly in the instruction stream. It might be cleaner to encode it in a
4836 // literal pool, but since Printf is usually used for debugging, it is
4837 // beneficial for it to be minimally dependent on other features.
4838 Label format_address;
4839 Adr(x0, &format_address);
4840
4841 // Emit the format string directly in the instruction stream.
4842 { BlockPoolsScope scope(this);
4843 Label after_data;
4844 B(&after_data);
4845 Bind(&format_address);
4846 EmitStringData(format);
4847 Unreachable();
4848 Bind(&after_data);
4849 }
4850
4851 // We don't pass any arguments on the stack, but we still need to align the C
4852 // stack pointer to a 16-byte boundary for PCS compliance.
4853 if (!csp.Is(StackPointer())) {
4854 Bic(csp, StackPointer(), 0xf);
4855 }
4856
4857 CallPrintf(arg_count, pcs);
4858}
4859
4860
4861void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
4862 // A call to printf needs special handling for the simulator, since the system
4863 // printf function will use a different instruction set and the procedure-call
4864 // standard will not be compatible.
4865#ifdef USE_SIMULATOR
4866 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
4867 hlt(kImmExceptionIsPrintf);
4868 dc32(arg_count); // kPrintfArgCountOffset
4869
4870 // Determine the argument pattern.
4871 uint32_t arg_pattern_list = 0;
4872 for (int i = 0; i < arg_count; i++) {
4873 uint32_t arg_pattern;
4874 if (args[i].IsRegister()) {
4875 arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
4876 } else {
4877 DCHECK(args[i].Is64Bits());
4878 arg_pattern = kPrintfArgD;
4879 }
4880 DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
4881 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
4882 }
4883 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
4884 }
4885#else
4886 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
4887#endif
4888}
4889
4890
4891void MacroAssembler::Printf(const char * format,
4892 CPURegister arg0,
4893 CPURegister arg1,
4894 CPURegister arg2,
4895 CPURegister arg3) {
4896 // We can only print sp if it is the current stack pointer.
4897 if (!csp.Is(StackPointer())) {
4898 DCHECK(!csp.Aliases(arg0));
4899 DCHECK(!csp.Aliases(arg1));
4900 DCHECK(!csp.Aliases(arg2));
4901 DCHECK(!csp.Aliases(arg3));
4902 }
4903
4904 // Printf is expected to preserve all registers, so make sure that none are
4905 // available as scratch registers until we've preserved them.
4906 RegList old_tmp_list = TmpList()->list();
4907 RegList old_fp_tmp_list = FPTmpList()->list();
4908 TmpList()->set_list(0);
4909 FPTmpList()->set_list(0);
4910
4911 // Preserve all caller-saved registers as well as NZCV.
4912 // If csp is the stack pointer, PushCPURegList asserts that the size of each
4913 // list is a multiple of 16 bytes.
4914 PushCPURegList(kCallerSaved);
4915 PushCPURegList(kCallerSavedFP);
4916
4917 // We can use caller-saved registers as scratch values (except for argN).
4918 CPURegList tmp_list = kCallerSaved;
4919 CPURegList fp_tmp_list = kCallerSavedFP;
4920 tmp_list.Remove(arg0, arg1, arg2, arg3);
4921 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4922 TmpList()->set_list(tmp_list.list());
4923 FPTmpList()->set_list(fp_tmp_list.list());
4924
4925 { UseScratchRegisterScope temps(this);
4926 // If any of the arguments are the current stack pointer, allocate a new
4927 // register for them, and adjust the value to compensate for pushing the
4928 // caller-saved registers.
4929 bool arg0_sp = StackPointer().Aliases(arg0);
4930 bool arg1_sp = StackPointer().Aliases(arg1);
4931 bool arg2_sp = StackPointer().Aliases(arg2);
4932 bool arg3_sp = StackPointer().Aliases(arg3);
4933 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
4934 // Allocate a register to hold the original stack pointer value, to pass
4935 // to PrintfNoPreserve as an argument.
4936 Register arg_sp = temps.AcquireX();
4937 Add(arg_sp, StackPointer(),
4938 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
4939 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
4940 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
4941 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
4942 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
4943 }
4944
4945 // Preserve NZCV.
4946 { UseScratchRegisterScope temps(this);
4947 Register tmp = temps.AcquireX();
4948 Mrs(tmp, NZCV);
4949 Push(tmp, xzr);
4950 }
4951
4952 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
4953
4954 // Restore NZCV.
4955 { UseScratchRegisterScope temps(this);
4956 Register tmp = temps.AcquireX();
4957 Pop(xzr, tmp);
4958 Msr(NZCV, tmp);
4959 }
4960 }
4961
4962 PopCPURegList(kCallerSavedFP);
4963 PopCPURegList(kCallerSaved);
4964
4965 TmpList()->set_list(old_tmp_list);
4966 FPTmpList()->set_list(old_fp_tmp_list);
4967}
4968
4969
4970void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
4971 // TODO(jbramley): Other architectures use the internal memcpy to copy the
4972 // sequence. If this is a performance bottleneck, we should consider caching
4973 // the sequence and copying it in the same way.
4974 InstructionAccurateScope scope(this,
4975 kNoCodeAgeSequenceLength / kInstructionSize);
4976 DCHECK(jssp.Is(StackPointer()));
4977 EmitFrameSetupForCodeAgePatching(this);
4978}
4979
4980
4981
4982void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
4983 InstructionAccurateScope scope(this,
4984 kNoCodeAgeSequenceLength / kInstructionSize);
4985 DCHECK(jssp.Is(StackPointer()));
4986 EmitCodeAgeSequence(this, stub);
4987}
4988
4989
4990#undef __
4991#define __ assm->
4992
4993
4994void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
4995 Label start;
4996 __ bind(&start);
4997
4998 // We can do this sequence using four instructions, but the code ageing
4999 // sequence that patches it needs five, so we use the extra space to try to
5000 // simplify some addressing modes and remove some dependencies (compared to
5001 // using two stp instructions with write-back).
5002 __ sub(jssp, jssp, 4 * kXRegSize);
5003 __ sub(csp, csp, 4 * kXRegSize);
5004 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
5005 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
5006 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
5007
5008 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
5009}
5010
5011
5012void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
5013 Code * stub) {
5014 Label start;
5015 __ bind(&start);
5016 // When the stub is called, the sequence is replaced with the young sequence
5017 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
5018 // stub jumps to &start, stored in x0. The young sequence does not call the
5019 // stub so there is no infinite loop here.
5020 //
5021 // A branch (br) is used rather than a call (blr) because this code replaces
5022 // the frame setup code that would normally preserve lr.
5023 __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
5024 __ adr(x0, &start);
5025 __ br(ip0);
5026 // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
5027 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
5028 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
5029 if (stub) {
5030 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
5031 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
5032 }
5033}
5034
5035
5036bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
5037 bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
5038 DCHECK(is_young ||
5039 isolate->code_aging_helper()->IsOld(sequence));
5040 return is_young;
5041}
5042
5043
5044void MacroAssembler::TruncatingDiv(Register result,
5045 Register dividend,
5046 int32_t divisor) {
5047 DCHECK(!AreAliased(result, dividend));
5048 DCHECK(result.Is32Bits() && dividend.Is32Bits());
5049 base::MagicNumbersForDivision<uint32_t> mag =
5050 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
5051 Mov(result, mag.multiplier);
5052 Smull(result.X(), dividend, result);
5053 Asr(result.X(), result.X(), 32);
5054 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
5055 if (divisor > 0 && neg) Add(result, result, dividend);
5056 if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
5057 if (mag.shift > 0) Asr(result, result, mag.shift);
5058 Add(result, result, Operand(dividend, LSR, 31));
5059}
5060
5061
5062#undef __
5063
5064
5065UseScratchRegisterScope::~UseScratchRegisterScope() {
5066 available_->set_list(old_available_);
5067 availablefp_->set_list(old_availablefp_);
5068}
5069
5070
5071Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
5072 int code = AcquireNextAvailable(available_).code();
5073 return Register::Create(code, reg.SizeInBits());
5074}
5075
5076
5077FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
5078 int code = AcquireNextAvailable(availablefp_).code();
5079 return FPRegister::Create(code, reg.SizeInBits());
5080}
5081
5082
5083CPURegister UseScratchRegisterScope::AcquireNextAvailable(
5084 CPURegList* available) {
5085 CHECK(!available->IsEmpty());
5086 CPURegister result = available->PopLowestIndex();
5087 DCHECK(!AreAliased(result, xzr, csp));
5088 return result;
5089}
5090
5091
5092CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
5093 const CPURegister& reg) {
5094 DCHECK(available->IncludesAliasOf(reg));
5095 available->Remove(reg);
5096 return reg;
5097}
5098
5099
5100#define __ masm->
5101
5102
5103void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
5104 const Label* smi_check) {
5105 Assembler::BlockPoolsScope scope(masm);
5106 if (reg.IsValid()) {
5107 DCHECK(smi_check->is_bound());
5108 DCHECK(reg.Is64Bits());
5109
5110 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
5111 // 'check' in the other bits. The possible offset is limited in that we
5112 // use BitField to pack the data, and the underlying data type is a
5113 // uint32_t.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005114 uint32_t delta =
5115 static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005116 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
5117 } else {
5118 DCHECK(!smi_check->is_bound());
5119
5120 // An offset of 0 indicates that there is no patch site.
5121 __ InlineData(0);
5122 }
5123}
5124
5125
5126InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
5127 : reg_(NoReg), smi_check_(NULL) {
5128 InstructionSequence* inline_data = InstructionSequence::At(info);
5129 DCHECK(inline_data->IsInlineData());
5130 if (inline_data->IsInlineData()) {
5131 uint64_t payload = inline_data->InlineData();
5132 // We use BitField to decode the payload, and BitField can only handle
5133 // 32-bit values.
5134 DCHECK(is_uint32(payload));
5135 if (payload != 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005136 uint32_t payload32 = static_cast<uint32_t>(payload);
5137 int reg_code = RegisterBits::decode(payload32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005138 reg_ = Register::XRegFromCode(reg_code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005139 int smi_check_delta = DeltaBits::decode(payload32);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005140 DCHECK(smi_check_delta != 0);
5141 smi_check_ = inline_data->preceding(smi_check_delta);
5142 }
5143 }
5144}
5145
5146
5147#undef __
5148
5149
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005150} // namespace internal
5151} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005152
5153#endif // V8_TARGET_ARCH_ARM64