blob: 770d425af256a0419bf1162e6a1c307ea1d5579d [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2//
3// Redistribution and use in source and binary forms, with or without
4// modification, are permitted provided that the following conditions are
5// met:
6//
7// * Redistributions of source code must retain the above copyright
8// notice, this list of conditions and the following disclaimer.
9// * Redistributions in binary form must reproduce the above
10// copyright notice, this list of conditions and the following
11// disclaimer in the documentation and/or other materials provided
12// with the distribution.
13// * Neither the name of Google Inc. nor the names of its
14// contributors may be used to endorse or promote products derived
15// from this software without specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29#include "src/v8.h"
30
31#if V8_TARGET_ARCH_ARM64
32
33#define ARM64_DEFINE_REG_STATICS
34
35#include "src/arm64/assembler-arm64-inl.h"
36#include "src/base/bits.h"
37#include "src/base/cpu.h"
38
39namespace v8 {
40namespace internal {
41
42
43// -----------------------------------------------------------------------------
44// CpuFeatures implementation.
45
46void CpuFeatures::ProbeImpl(bool cross_compile) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -040047 // AArch64 has no configuration options, no further probing is required.
48 supported_ = 0;
49
50 // Only use statically determined features for cross compile (snapshot).
51 if (cross_compile) return;
52
53 // Probe for runtime features
54 base::CPU cpu;
55 if (cpu.implementer() == base::CPU::NVIDIA &&
56 cpu.variant() == base::CPU::NVIDIA_DENVER) {
57 supported_ |= 1u << COHERENT_CACHE;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000058 }
59}
60
61
62void CpuFeatures::PrintTarget() { }
Emily Bernierd0a1eb72015-03-24 16:35:39 -040063
64
65void CpuFeatures::PrintFeatures() {
66 printf("COHERENT_CACHE=%d\n", CpuFeatures::IsSupported(COHERENT_CACHE));
67}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000068
69
70// -----------------------------------------------------------------------------
71// CPURegList utilities.
72
73CPURegister CPURegList::PopLowestIndex() {
74 DCHECK(IsValid());
75 if (IsEmpty()) {
76 return NoCPUReg;
77 }
78 int index = CountTrailingZeros(list_, kRegListSizeInBits);
79 DCHECK((1 << index) & list_);
80 Remove(index);
81 return CPURegister::Create(index, size_, type_);
82}
83
84
85CPURegister CPURegList::PopHighestIndex() {
86 DCHECK(IsValid());
87 if (IsEmpty()) {
88 return NoCPUReg;
89 }
90 int index = CountLeadingZeros(list_, kRegListSizeInBits);
91 index = kRegListSizeInBits - 1 - index;
92 DCHECK((1 << index) & list_);
93 Remove(index);
94 return CPURegister::Create(index, size_, type_);
95}
96
97
98void CPURegList::RemoveCalleeSaved() {
99 if (type() == CPURegister::kRegister) {
100 Remove(GetCalleeSaved(RegisterSizeInBits()));
101 } else if (type() == CPURegister::kFPRegister) {
102 Remove(GetCalleeSavedFP(RegisterSizeInBits()));
103 } else {
104 DCHECK(type() == CPURegister::kNoRegister);
105 DCHECK(IsEmpty());
106 // The list must already be empty, so do nothing.
107 }
108}
109
110
111CPURegList CPURegList::GetCalleeSaved(unsigned size) {
112 return CPURegList(CPURegister::kRegister, size, 19, 29);
113}
114
115
116CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
117 return CPURegList(CPURegister::kFPRegister, size, 8, 15);
118}
119
120
121CPURegList CPURegList::GetCallerSaved(unsigned size) {
122 // Registers x0-x18 and lr (x30) are caller-saved.
123 CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
124 list.Combine(lr);
125 return list;
126}
127
128
129CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
130 // Registers d0-d7 and d16-d31 are caller-saved.
131 CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
132 list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
133 return list;
134}
135
136
137// This function defines the list of registers which are associated with a
138// safepoint slot. Safepoint register slots are saved contiguously on the stack.
139// MacroAssembler::SafepointRegisterStackIndex handles mapping from register
140// code to index in the safepoint register slots. Any change here can affect
141// this mapping.
142CPURegList CPURegList::GetSafepointSavedRegisters() {
143 CPURegList list = CPURegList::GetCalleeSaved();
144 list.Combine(
145 CPURegList(CPURegister::kRegister, kXRegSizeInBits, kJSCallerSaved));
146
147 // Note that unfortunately we can't use symbolic names for registers and have
148 // to directly use register codes. This is because this function is used to
149 // initialize some static variables and we can't rely on register variables
150 // to be initialized due to static initialization order issues in C++.
151
152 // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be
153 // preserved outside of the macro assembler.
154 list.Remove(16);
155 list.Remove(17);
156
157 // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
158 // is a caller-saved register according to the procedure call standard.
159 list.Combine(18);
160
161 // Drop jssp as the stack pointer doesn't need to be included.
162 list.Remove(28);
163
164 // Add the link register (x30) to the safepoint list.
165 list.Combine(30);
166
167 return list;
168}
169
170
171// -----------------------------------------------------------------------------
172// Implementation of RelocInfo
173
174const int RelocInfo::kApplyMask = 0;
175
176
177bool RelocInfo::IsCodedSpecially() {
178 // The deserializer needs to know whether a pointer is specially coded. Being
179 // specially coded on ARM64 means that it is a movz/movk sequence. We don't
180 // generate those for relocatable pointers.
181 return false;
182}
183
184
185bool RelocInfo::IsInConstantPool() {
186 Instruction* instr = reinterpret_cast<Instruction*>(pc_);
187 return instr->IsLdrLiteralX();
188}
189
190
191void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
192 // Patch the code at the current address with the supplied instructions.
193 Instr* pc = reinterpret_cast<Instr*>(pc_);
194 Instr* instr = reinterpret_cast<Instr*>(instructions);
195 for (int i = 0; i < instruction_count; i++) {
196 *(pc + i) = *(instr + i);
197 }
198
199 // Indicate that code has changed.
200 CpuFeatures::FlushICache(pc_, instruction_count * kInstructionSize);
201}
202
203
204// Patch the code at the current PC with a call to the target address.
205// Additional guard instructions can be added if required.
206void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
207 UNIMPLEMENTED();
208}
209
210
211Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
212 Register reg3, Register reg4) {
213 CPURegList regs(reg1, reg2, reg3, reg4);
214 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
215 Register candidate = Register::FromAllocationIndex(i);
216 if (regs.IncludesAliasOf(candidate)) continue;
217 return candidate;
218 }
219 UNREACHABLE();
220 return NoReg;
221}
222
223
224bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
225 const CPURegister& reg3, const CPURegister& reg4,
226 const CPURegister& reg5, const CPURegister& reg6,
227 const CPURegister& reg7, const CPURegister& reg8) {
228 int number_of_valid_regs = 0;
229 int number_of_valid_fpregs = 0;
230
231 RegList unique_regs = 0;
232 RegList unique_fpregs = 0;
233
234 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
235
236 for (unsigned i = 0; i < arraysize(regs); i++) {
237 if (regs[i].IsRegister()) {
238 number_of_valid_regs++;
239 unique_regs |= regs[i].Bit();
240 } else if (regs[i].IsFPRegister()) {
241 number_of_valid_fpregs++;
242 unique_fpregs |= regs[i].Bit();
243 } else {
244 DCHECK(!regs[i].IsValid());
245 }
246 }
247
248 int number_of_unique_regs =
249 CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
250 int number_of_unique_fpregs =
251 CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
252
253 DCHECK(number_of_valid_regs >= number_of_unique_regs);
254 DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs);
255
256 return (number_of_valid_regs != number_of_unique_regs) ||
257 (number_of_valid_fpregs != number_of_unique_fpregs);
258}
259
260
261bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
262 const CPURegister& reg3, const CPURegister& reg4,
263 const CPURegister& reg5, const CPURegister& reg6,
264 const CPURegister& reg7, const CPURegister& reg8) {
265 DCHECK(reg1.IsValid());
266 bool match = true;
267 match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
268 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
269 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
270 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
271 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
272 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
273 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
274 return match;
275}
276
277
278void Immediate::InitializeHandle(Handle<Object> handle) {
279 AllowDeferredHandleDereference using_raw_address;
280
281 // Verify all Objects referred by code are NOT in new space.
282 Object* obj = *handle;
283 if (obj->IsHeapObject()) {
284 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
285 value_ = reinterpret_cast<intptr_t>(handle.location());
286 rmode_ = RelocInfo::EMBEDDED_OBJECT;
287 } else {
288 STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
289 value_ = reinterpret_cast<intptr_t>(obj);
290 rmode_ = RelocInfo::NONE64;
291 }
292}
293
294
295bool Operand::NeedsRelocation(const Assembler* assembler) const {
296 RelocInfo::Mode rmode = immediate_.rmode();
297
298 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
299 return assembler->serializer_enabled();
300 }
301
302 return !RelocInfo::IsNone(rmode);
303}
304
305
306// Constant Pool.
307void ConstPool::RecordEntry(intptr_t data,
308 RelocInfo::Mode mode) {
309 DCHECK(mode != RelocInfo::COMMENT &&
310 mode != RelocInfo::POSITION &&
311 mode != RelocInfo::STATEMENT_POSITION &&
312 mode != RelocInfo::CONST_POOL &&
313 mode != RelocInfo::VENEER_POOL &&
314 mode != RelocInfo::CODE_AGE_SEQUENCE);
315
316 uint64_t raw_data = static_cast<uint64_t>(data);
317 int offset = assm_->pc_offset();
318 if (IsEmpty()) {
319 first_use_ = offset;
320 }
321
322 std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset);
323 if (CanBeShared(mode)) {
324 shared_entries_.insert(entry);
325 if (shared_entries_.count(entry.first) == 1) {
326 shared_entries_count++;
327 }
328 } else {
329 unique_entries_.push_back(entry);
330 }
331
332 if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
333 // Request constant pool emission after the next instruction.
334 assm_->SetNextConstPoolCheckIn(1);
335 }
336}
337
338
339int ConstPool::DistanceToFirstUse() {
340 DCHECK(first_use_ >= 0);
341 return assm_->pc_offset() - first_use_;
342}
343
344
345int ConstPool::MaxPcOffset() {
346 // There are no pending entries in the pool so we can never get out of
347 // range.
348 if (IsEmpty()) return kMaxInt;
349
350 // Entries are not necessarily emitted in the order they are added so in the
351 // worst case the first constant pool use will be accessing the last entry.
352 return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
353}
354
355
356int ConstPool::WorstCaseSize() {
357 if (IsEmpty()) return 0;
358
359 // Max size prologue:
360 // b over
361 // ldr xzr, #pool_size
362 // blr xzr
363 // nop
364 // All entries are 64-bit for now.
365 return 4 * kInstructionSize + EntryCount() * kPointerSize;
366}
367
368
369int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
370 if (IsEmpty()) return 0;
371
372 // Prologue is:
373 // b over ;; if require_jump
374 // ldr xzr, #pool_size
375 // blr xzr
376 // nop ;; if not 64-bit aligned
377 int prologue_size = require_jump ? kInstructionSize : 0;
378 prologue_size += 2 * kInstructionSize;
379 prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ?
380 0 : kInstructionSize;
381
382 // All entries are 64-bit for now.
383 return prologue_size + EntryCount() * kPointerSize;
384}
385
386
387void ConstPool::Emit(bool require_jump) {
388 DCHECK(!assm_->is_const_pool_blocked());
389 // Prevent recursive pool emission and protect from veneer pools.
390 Assembler::BlockPoolsScope block_pools(assm_);
391
392 int size = SizeIfEmittedAtCurrentPc(require_jump);
393 Label size_check;
394 assm_->bind(&size_check);
395
396 assm_->RecordConstPool(size);
397 // Emit the constant pool. It is preceded by an optional branch if
398 // require_jump and a header which will:
399 // 1) Encode the size of the constant pool, for use by the disassembler.
400 // 2) Terminate the program, to try to prevent execution from accidentally
401 // flowing into the constant pool.
402 // 3) align the pool entries to 64-bit.
403 // The header is therefore made of up to three arm64 instructions:
404 // ldr xzr, #<size of the constant pool in 32-bit words>
405 // blr xzr
406 // nop
407 //
408 // If executed, the header will likely segfault and lr will point to the
409 // instruction following the offending blr.
410 // TODO(all): Make the alignment part less fragile. Currently code is
411 // allocated as a byte array so there are no guarantees the alignment will
412 // be preserved on compaction. Currently it works as allocation seems to be
413 // 64-bit aligned.
414
415 // Emit branch if required
416 Label after_pool;
417 if (require_jump) {
418 assm_->b(&after_pool);
419 }
420
421 // Emit the header.
422 assm_->RecordComment("[ Constant Pool");
423 EmitMarker();
424 EmitGuard();
425 assm_->Align(8);
426
427 // Emit constant pool entries.
428 // TODO(all): currently each relocated constant is 64 bits, consider adding
429 // support for 32-bit entries.
430 EmitEntries();
431 assm_->RecordComment("]");
432
433 if (after_pool.is_linked()) {
434 assm_->bind(&after_pool);
435 }
436
437 DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) ==
438 static_cast<unsigned>(size));
439}
440
441
442void ConstPool::Clear() {
443 shared_entries_.clear();
444 shared_entries_count = 0;
445 unique_entries_.clear();
446 first_use_ = -1;
447}
448
449
450bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
451 // Constant pool currently does not support 32-bit entries.
452 DCHECK(mode != RelocInfo::NONE32);
453
454 return RelocInfo::IsNone(mode) ||
455 (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL));
456}
457
458
459void ConstPool::EmitMarker() {
460 // A constant pool size is expressed in number of 32-bits words.
461 // Currently all entries are 64-bit.
462 // + 1 is for the crash guard.
463 // + 0/1 for alignment.
464 int word_count = EntryCount() * 2 + 1 +
465 (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
466 assm_->Emit(LDR_x_lit |
467 Assembler::ImmLLiteral(word_count) |
468 Assembler::Rt(xzr));
469}
470
471
472MemOperand::PairResult MemOperand::AreConsistentForPair(
473 const MemOperand& operandA,
474 const MemOperand& operandB,
475 int access_size_log2) {
476 DCHECK(access_size_log2 >= 0);
477 DCHECK(access_size_log2 <= 3);
478 // Step one: check that they share the same base, that the mode is Offset
479 // and that the offset is a multiple of access size.
480 if (!operandA.base().Is(operandB.base()) ||
481 (operandA.addrmode() != Offset) ||
482 (operandB.addrmode() != Offset) ||
483 ((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) {
484 return kNotPair;
485 }
486 // Step two: check that the offsets are contiguous and that the range
487 // is OK for ldp/stp.
488 if ((operandB.offset() == operandA.offset() + (1 << access_size_log2)) &&
489 is_int7(operandA.offset() >> access_size_log2)) {
490 return kPairAB;
491 }
492 if ((operandA.offset() == operandB.offset() + (1 << access_size_log2)) &&
493 is_int7(operandB.offset() >> access_size_log2)) {
494 return kPairBA;
495 }
496 return kNotPair;
497}
498
499
500void ConstPool::EmitGuard() {
501#ifdef DEBUG
502 Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
503 DCHECK(instr->preceding()->IsLdrLiteralX() &&
504 instr->preceding()->Rt() == xzr.code());
505#endif
506 assm_->EmitPoolGuard();
507}
508
509
510void ConstPool::EmitEntries() {
511 DCHECK(IsAligned(assm_->pc_offset(), 8));
512
513 typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator;
514 SharedEntriesIterator value_it;
515 // Iterate through the keys (constant pool values).
516 for (value_it = shared_entries_.begin();
517 value_it != shared_entries_.end();
518 value_it = shared_entries_.upper_bound(value_it->first)) {
519 std::pair<SharedEntriesIterator, SharedEntriesIterator> range;
520 uint64_t data = value_it->first;
521 range = shared_entries_.equal_range(data);
522 SharedEntriesIterator offset_it;
523 // Iterate through the offsets of a given key.
524 for (offset_it = range.first; offset_it != range.second; offset_it++) {
525 Instruction* instr = assm_->InstructionAt(offset_it->second);
526
527 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
528 DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
529 instr->SetImmPCOffsetTarget(assm_->pc());
530 }
531 assm_->dc64(data);
532 }
533 shared_entries_.clear();
534 shared_entries_count = 0;
535
536 // Emit unique entries.
537 std::vector<std::pair<uint64_t, int> >::const_iterator unique_it;
538 for (unique_it = unique_entries_.begin();
539 unique_it != unique_entries_.end();
540 unique_it++) {
541 Instruction* instr = assm_->InstructionAt(unique_it->second);
542
543 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
544 DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
545 instr->SetImmPCOffsetTarget(assm_->pc());
546 assm_->dc64(unique_it->first);
547 }
548 unique_entries_.clear();
549 first_use_ = -1;
550}
551
552
553// Assembler
554Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
555 : AssemblerBase(isolate, buffer, buffer_size),
556 constpool_(this),
557 recorded_ast_id_(TypeFeedbackId::None()),
558 unresolved_branches_(),
559 positions_recorder_(this) {
560 const_pool_blocked_nesting_ = 0;
561 veneer_pool_blocked_nesting_ = 0;
562 Reset();
563}
564
565
566Assembler::~Assembler() {
567 DCHECK(constpool_.IsEmpty());
568 DCHECK(const_pool_blocked_nesting_ == 0);
569 DCHECK(veneer_pool_blocked_nesting_ == 0);
570}
571
572
573void Assembler::Reset() {
574#ifdef DEBUG
575 DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
576 DCHECK(const_pool_blocked_nesting_ == 0);
577 DCHECK(veneer_pool_blocked_nesting_ == 0);
578 DCHECK(unresolved_branches_.empty());
579 memset(buffer_, 0, pc_ - buffer_);
580#endif
581 pc_ = buffer_;
582 reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
583 reinterpret_cast<byte*>(pc_));
584 constpool_.Clear();
585 next_constant_pool_check_ = 0;
586 next_veneer_pool_check_ = kMaxInt;
587 no_const_pool_before_ = 0;
588 ClearRecordedAstId();
589}
590
591
592void Assembler::GetCode(CodeDesc* desc) {
593 // Emit constant pool if necessary.
594 CheckConstPool(true, false);
595 DCHECK(constpool_.IsEmpty());
596
597 // Set up code descriptor.
598 if (desc) {
599 desc->buffer = reinterpret_cast<byte*>(buffer_);
600 desc->buffer_size = buffer_size_;
601 desc->instr_size = pc_offset();
602 desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
603 reloc_info_writer.pos();
604 desc->origin = this;
605 }
606}
607
608
609void Assembler::Align(int m) {
610 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
611 while ((pc_offset() & (m - 1)) != 0) {
612 nop();
613 }
614}
615
616
617void Assembler::CheckLabelLinkChain(Label const * label) {
618#ifdef DEBUG
619 if (label->is_linked()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400620 static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour.
621 int links_checked = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000622 int linkoffset = label->pos();
623 bool end_of_chain = false;
624 while (!end_of_chain) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400625 if (++links_checked > kMaxLinksToCheck) break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000626 Instruction * link = InstructionAt(linkoffset);
627 int linkpcoffset = link->ImmPCOffset();
628 int prevlinkoffset = linkoffset + linkpcoffset;
629
630 end_of_chain = (linkoffset == prevlinkoffset);
631 linkoffset = linkoffset + linkpcoffset;
632 }
633 }
634#endif
635}
636
637
638void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
639 Label* label,
640 Instruction* label_veneer) {
641 DCHECK(label->is_linked());
642
643 CheckLabelLinkChain(label);
644
645 Instruction* link = InstructionAt(label->pos());
646 Instruction* prev_link = link;
647 Instruction* next_link;
648 bool end_of_chain = false;
649
650 while (link != branch && !end_of_chain) {
651 next_link = link->ImmPCOffsetTarget();
652 end_of_chain = (link == next_link);
653 prev_link = link;
654 link = next_link;
655 }
656
657 DCHECK(branch == link);
658 next_link = branch->ImmPCOffsetTarget();
659
660 if (branch == prev_link) {
661 // The branch is the first instruction in the chain.
662 if (branch == next_link) {
663 // It is also the last instruction in the chain, so it is the only branch
664 // currently referring to this label.
665 label->Unuse();
666 } else {
667 label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
668 }
669
670 } else if (branch == next_link) {
671 // The branch is the last (but not also the first) instruction in the chain.
672 prev_link->SetImmPCOffsetTarget(prev_link);
673
674 } else {
675 // The branch is in the middle of the chain.
676 if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
677 prev_link->SetImmPCOffsetTarget(next_link);
678 } else if (label_veneer != NULL) {
679 // Use the veneer for all previous links in the chain.
680 prev_link->SetImmPCOffsetTarget(prev_link);
681
682 end_of_chain = false;
683 link = next_link;
684 while (!end_of_chain) {
685 next_link = link->ImmPCOffsetTarget();
686 end_of_chain = (link == next_link);
687 link->SetImmPCOffsetTarget(label_veneer);
688 link = next_link;
689 }
690 } else {
691 // The assert below will fire.
692 // Some other work could be attempted to fix up the chain, but it would be
693 // rather complicated. If we crash here, we may want to consider using an
694 // other mechanism than a chain of branches.
695 //
696 // Note that this situation currently should not happen, as we always call
697 // this function with a veneer to the target label.
698 // However this could happen with a MacroAssembler in the following state:
699 // [previous code]
700 // B(label);
701 // [20KB code]
702 // Tbz(label); // First tbz. Pointing to unconditional branch.
703 // [20KB code]
704 // Tbz(label); // Second tbz. Pointing to the first tbz.
705 // [more code]
706 // and this function is called to remove the first tbz from the label link
707 // chain. Since tbz has a range of +-32KB, the second tbz cannot point to
708 // the unconditional branch.
709 CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
710 UNREACHABLE();
711 }
712 }
713
714 CheckLabelLinkChain(label);
715}
716
717
718void Assembler::bind(Label* label) {
719 // Bind label to the address at pc_. All instructions (most likely branches)
720 // that are linked to this label will be updated to point to the newly-bound
721 // label.
722
723 DCHECK(!label->is_near_linked());
724 DCHECK(!label->is_bound());
725
726 DeleteUnresolvedBranchInfoForLabel(label);
727
728 // If the label is linked, the link chain looks something like this:
729 //
730 // |--I----I-------I-------L
731 // |---------------------->| pc_offset
732 // |-------------->| linkoffset = label->pos()
733 // |<------| link->ImmPCOffset()
734 // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset()
735 //
736 // On each iteration, the last link is updated and then removed from the
737 // chain until only one remains. At that point, the label is bound.
738 //
739 // If the label is not linked, no preparation is required before binding.
740 while (label->is_linked()) {
741 int linkoffset = label->pos();
742 Instruction* link = InstructionAt(linkoffset);
743 int prevlinkoffset = linkoffset + link->ImmPCOffset();
744
745 CheckLabelLinkChain(label);
746
747 DCHECK(linkoffset >= 0);
748 DCHECK(linkoffset < pc_offset());
749 DCHECK((linkoffset > prevlinkoffset) ||
750 (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
751 DCHECK(prevlinkoffset >= 0);
752
753 // Update the link to point to the label.
754 link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
755
756 // Link the label to the previous link in the chain.
757 if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
758 // We hit kStartOfLabelLinkChain, so the chain is fully processed.
759 label->Unuse();
760 } else {
761 // Update the label for the next iteration.
762 label->link_to(prevlinkoffset);
763 }
764 }
765 label->bind_to(pc_offset());
766
767 DCHECK(label->is_bound());
768 DCHECK(!label->is_linked());
769}
770
771
772int Assembler::LinkAndGetByteOffsetTo(Label* label) {
773 DCHECK(sizeof(*pc_) == 1);
774 CheckLabelLinkChain(label);
775
776 int offset;
777 if (label->is_bound()) {
778 // The label is bound, so it does not need to be updated. Referring
779 // instructions must link directly to the label as they will not be
780 // updated.
781 //
782 // In this case, label->pos() returns the offset of the label from the
783 // start of the buffer.
784 //
785 // Note that offset can be zero for self-referential instructions. (This
786 // could be useful for ADR, for example.)
787 offset = label->pos() - pc_offset();
788 DCHECK(offset <= 0);
789 } else {
790 if (label->is_linked()) {
791 // The label is linked, so the referring instruction should be added onto
792 // the end of the label's link chain.
793 //
794 // In this case, label->pos() returns the offset of the last linked
795 // instruction from the start of the buffer.
796 offset = label->pos() - pc_offset();
797 DCHECK(offset != kStartOfLabelLinkChain);
798 // Note that the offset here needs to be PC-relative only so that the
799 // first instruction in a buffer can link to an unbound label. Otherwise,
800 // the offset would be 0 for this case, and 0 is reserved for
801 // kStartOfLabelLinkChain.
802 } else {
803 // The label is unused, so it now becomes linked and the referring
804 // instruction is at the start of the new link chain.
805 offset = kStartOfLabelLinkChain;
806 }
807 // The instruction at pc is now the last link in the label's chain.
808 label->link_to(pc_offset());
809 }
810
811 return offset;
812}
813
814
815void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
816 DCHECK(label->is_linked());
817 CheckLabelLinkChain(label);
818
819 int link_offset = label->pos();
820 int link_pcoffset;
821 bool end_of_chain = false;
822
823 while (!end_of_chain) {
824 Instruction * link = InstructionAt(link_offset);
825 link_pcoffset = link->ImmPCOffset();
826
827 // ADR instructions are not handled by veneers.
828 if (link->IsImmBranch()) {
829 int max_reachable_pc = InstructionOffset(link) +
830 Instruction::ImmBranchRange(link->BranchType());
831 typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
832 std::pair<unresolved_info_it, unresolved_info_it> range;
833 range = unresolved_branches_.equal_range(max_reachable_pc);
834 unresolved_info_it it;
835 for (it = range.first; it != range.second; ++it) {
836 if (it->second.pc_offset_ == link_offset) {
837 unresolved_branches_.erase(it);
838 break;
839 }
840 }
841 }
842
843 end_of_chain = (link_pcoffset == 0);
844 link_offset = link_offset + link_pcoffset;
845 }
846}
847
848
849void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
850 if (unresolved_branches_.empty()) {
851 DCHECK(next_veneer_pool_check_ == kMaxInt);
852 return;
853 }
854
855 if (label->is_linked()) {
856 // Branches to this label will be resolved when the label is bound, normally
857 // just after all the associated info has been deleted.
858 DeleteUnresolvedBranchInfoForLabelTraverse(label);
859 }
860 if (unresolved_branches_.empty()) {
861 next_veneer_pool_check_ = kMaxInt;
862 } else {
863 next_veneer_pool_check_ =
864 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
865 }
866}
867
868
869void Assembler::StartBlockConstPool() {
870 if (const_pool_blocked_nesting_++ == 0) {
871 // Prevent constant pool checks happening by setting the next check to
872 // the biggest possible offset.
873 next_constant_pool_check_ = kMaxInt;
874 }
875}
876
877
878void Assembler::EndBlockConstPool() {
879 if (--const_pool_blocked_nesting_ == 0) {
880 // Check the constant pool hasn't been blocked for too long.
881 DCHECK(pc_offset() < constpool_.MaxPcOffset());
882 // Two cases:
883 // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
884 // still blocked
885 // * no_const_pool_before_ < next_constant_pool_check_ and the next emit
886 // will trigger a check.
887 next_constant_pool_check_ = no_const_pool_before_;
888 }
889}
890
891
892bool Assembler::is_const_pool_blocked() const {
893 return (const_pool_blocked_nesting_ > 0) ||
894 (pc_offset() < no_const_pool_before_);
895}
896
897
898bool Assembler::IsConstantPoolAt(Instruction* instr) {
899 // The constant pool marker is made of two instructions. These instructions
900 // will never be emitted by the JIT, so checking for the first one is enough:
901 // 0: ldr xzr, #<size of pool>
902 bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
903
904 // It is still worth asserting the marker is complete.
905 // 4: blr xzr
906 DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() &&
907 instr->following()->Rn() == xzr.code()));
908
909 return result;
910}
911
912
913int Assembler::ConstantPoolSizeAt(Instruction* instr) {
914#ifdef USE_SIMULATOR
915 // Assembler::debug() embeds constants directly into the instruction stream.
916 // Although this is not a genuine constant pool, treat it like one to avoid
917 // disassembling the constants.
918 if ((instr->Mask(ExceptionMask) == HLT) &&
919 (instr->ImmException() == kImmExceptionIsDebug)) {
920 const char* message =
921 reinterpret_cast<const char*>(
922 instr->InstructionAtOffset(kDebugMessageOffset));
923 int size = kDebugMessageOffset + strlen(message) + 1;
924 return RoundUp(size, kInstructionSize) / kInstructionSize;
925 }
926 // Same for printf support, see MacroAssembler::CallPrintf().
927 if ((instr->Mask(ExceptionMask) == HLT) &&
928 (instr->ImmException() == kImmExceptionIsPrintf)) {
929 return kPrintfLength / kInstructionSize;
930 }
931#endif
932 if (IsConstantPoolAt(instr)) {
933 return instr->ImmLLiteral();
934 } else {
935 return -1;
936 }
937}
938
939
940void Assembler::EmitPoolGuard() {
941 // We must generate only one instruction as this is used in scopes that
942 // control the size of the code generated.
943 Emit(BLR | Rn(xzr));
944}
945
946
947void Assembler::StartBlockVeneerPool() {
948 ++veneer_pool_blocked_nesting_;
949}
950
951
952void Assembler::EndBlockVeneerPool() {
953 if (--veneer_pool_blocked_nesting_ == 0) {
954 // Check the veneer pool hasn't been blocked for too long.
955 DCHECK(unresolved_branches_.empty() ||
956 (pc_offset() < unresolved_branches_first_limit()));
957 }
958}
959
960
961void Assembler::br(const Register& xn) {
962 positions_recorder()->WriteRecordedPositions();
963 DCHECK(xn.Is64Bits());
964 Emit(BR | Rn(xn));
965}
966
967
968void Assembler::blr(const Register& xn) {
969 positions_recorder()->WriteRecordedPositions();
970 DCHECK(xn.Is64Bits());
971 // The pattern 'blr xzr' is used as a guard to detect when execution falls
972 // through the constant pool. It should not be emitted.
973 DCHECK(!xn.Is(xzr));
974 Emit(BLR | Rn(xn));
975}
976
977
978void Assembler::ret(const Register& xn) {
979 positions_recorder()->WriteRecordedPositions();
980 DCHECK(xn.Is64Bits());
981 Emit(RET | Rn(xn));
982}
983
984
985void Assembler::b(int imm26) {
986 Emit(B | ImmUncondBranch(imm26));
987}
988
989
990void Assembler::b(Label* label) {
991 positions_recorder()->WriteRecordedPositions();
992 b(LinkAndGetInstructionOffsetTo(label));
993}
994
995
996void Assembler::b(int imm19, Condition cond) {
997 Emit(B_cond | ImmCondBranch(imm19) | cond);
998}
999
1000
1001void Assembler::b(Label* label, Condition cond) {
1002 positions_recorder()->WriteRecordedPositions();
1003 b(LinkAndGetInstructionOffsetTo(label), cond);
1004}
1005
1006
1007void Assembler::bl(int imm26) {
1008 positions_recorder()->WriteRecordedPositions();
1009 Emit(BL | ImmUncondBranch(imm26));
1010}
1011
1012
1013void Assembler::bl(Label* label) {
1014 positions_recorder()->WriteRecordedPositions();
1015 bl(LinkAndGetInstructionOffsetTo(label));
1016}
1017
1018
1019void Assembler::cbz(const Register& rt,
1020 int imm19) {
1021 positions_recorder()->WriteRecordedPositions();
1022 Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
1023}
1024
1025
1026void Assembler::cbz(const Register& rt,
1027 Label* label) {
1028 positions_recorder()->WriteRecordedPositions();
1029 cbz(rt, LinkAndGetInstructionOffsetTo(label));
1030}
1031
1032
1033void Assembler::cbnz(const Register& rt,
1034 int imm19) {
1035 positions_recorder()->WriteRecordedPositions();
1036 Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
1037}
1038
1039
1040void Assembler::cbnz(const Register& rt,
1041 Label* label) {
1042 positions_recorder()->WriteRecordedPositions();
1043 cbnz(rt, LinkAndGetInstructionOffsetTo(label));
1044}
1045
1046
1047void Assembler::tbz(const Register& rt,
1048 unsigned bit_pos,
1049 int imm14) {
1050 positions_recorder()->WriteRecordedPositions();
1051 DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
1052 Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
1053}
1054
1055
1056void Assembler::tbz(const Register& rt,
1057 unsigned bit_pos,
1058 Label* label) {
1059 positions_recorder()->WriteRecordedPositions();
1060 tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
1061}
1062
1063
1064void Assembler::tbnz(const Register& rt,
1065 unsigned bit_pos,
1066 int imm14) {
1067 positions_recorder()->WriteRecordedPositions();
1068 DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
1069 Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
1070}
1071
1072
1073void Assembler::tbnz(const Register& rt,
1074 unsigned bit_pos,
1075 Label* label) {
1076 positions_recorder()->WriteRecordedPositions();
1077 tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
1078}
1079
1080
1081void Assembler::adr(const Register& rd, int imm21) {
1082 DCHECK(rd.Is64Bits());
1083 Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
1084}
1085
1086
1087void Assembler::adr(const Register& rd, Label* label) {
1088 adr(rd, LinkAndGetByteOffsetTo(label));
1089}
1090
1091
1092void Assembler::add(const Register& rd,
1093 const Register& rn,
1094 const Operand& operand) {
1095 AddSub(rd, rn, operand, LeaveFlags, ADD);
1096}
1097
1098
1099void Assembler::adds(const Register& rd,
1100 const Register& rn,
1101 const Operand& operand) {
1102 AddSub(rd, rn, operand, SetFlags, ADD);
1103}
1104
1105
1106void Assembler::cmn(const Register& rn,
1107 const Operand& operand) {
1108 Register zr = AppropriateZeroRegFor(rn);
1109 adds(zr, rn, operand);
1110}
1111
1112
1113void Assembler::sub(const Register& rd,
1114 const Register& rn,
1115 const Operand& operand) {
1116 AddSub(rd, rn, operand, LeaveFlags, SUB);
1117}
1118
1119
1120void Assembler::subs(const Register& rd,
1121 const Register& rn,
1122 const Operand& operand) {
1123 AddSub(rd, rn, operand, SetFlags, SUB);
1124}
1125
1126
1127void Assembler::cmp(const Register& rn, const Operand& operand) {
1128 Register zr = AppropriateZeroRegFor(rn);
1129 subs(zr, rn, operand);
1130}
1131
1132
1133void Assembler::neg(const Register& rd, const Operand& operand) {
1134 Register zr = AppropriateZeroRegFor(rd);
1135 sub(rd, zr, operand);
1136}
1137
1138
1139void Assembler::negs(const Register& rd, const Operand& operand) {
1140 Register zr = AppropriateZeroRegFor(rd);
1141 subs(rd, zr, operand);
1142}
1143
1144
1145void Assembler::adc(const Register& rd,
1146 const Register& rn,
1147 const Operand& operand) {
1148 AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
1149}
1150
1151
1152void Assembler::adcs(const Register& rd,
1153 const Register& rn,
1154 const Operand& operand) {
1155 AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
1156}
1157
1158
1159void Assembler::sbc(const Register& rd,
1160 const Register& rn,
1161 const Operand& operand) {
1162 AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
1163}
1164
1165
1166void Assembler::sbcs(const Register& rd,
1167 const Register& rn,
1168 const Operand& operand) {
1169 AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
1170}
1171
1172
1173void Assembler::ngc(const Register& rd, const Operand& operand) {
1174 Register zr = AppropriateZeroRegFor(rd);
1175 sbc(rd, zr, operand);
1176}
1177
1178
1179void Assembler::ngcs(const Register& rd, const Operand& operand) {
1180 Register zr = AppropriateZeroRegFor(rd);
1181 sbcs(rd, zr, operand);
1182}
1183
1184
1185// Logical instructions.
1186void Assembler::and_(const Register& rd,
1187 const Register& rn,
1188 const Operand& operand) {
1189 Logical(rd, rn, operand, AND);
1190}
1191
1192
1193void Assembler::ands(const Register& rd,
1194 const Register& rn,
1195 const Operand& operand) {
1196 Logical(rd, rn, operand, ANDS);
1197}
1198
1199
1200void Assembler::tst(const Register& rn,
1201 const Operand& operand) {
1202 ands(AppropriateZeroRegFor(rn), rn, operand);
1203}
1204
1205
1206void Assembler::bic(const Register& rd,
1207 const Register& rn,
1208 const Operand& operand) {
1209 Logical(rd, rn, operand, BIC);
1210}
1211
1212
1213void Assembler::bics(const Register& rd,
1214 const Register& rn,
1215 const Operand& operand) {
1216 Logical(rd, rn, operand, BICS);
1217}
1218
1219
1220void Assembler::orr(const Register& rd,
1221 const Register& rn,
1222 const Operand& operand) {
1223 Logical(rd, rn, operand, ORR);
1224}
1225
1226
1227void Assembler::orn(const Register& rd,
1228 const Register& rn,
1229 const Operand& operand) {
1230 Logical(rd, rn, operand, ORN);
1231}
1232
1233
1234void Assembler::eor(const Register& rd,
1235 const Register& rn,
1236 const Operand& operand) {
1237 Logical(rd, rn, operand, EOR);
1238}
1239
1240
1241void Assembler::eon(const Register& rd,
1242 const Register& rn,
1243 const Operand& operand) {
1244 Logical(rd, rn, operand, EON);
1245}
1246
1247
1248void Assembler::lslv(const Register& rd,
1249 const Register& rn,
1250 const Register& rm) {
1251 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1252 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1253 Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
1254}
1255
1256
1257void Assembler::lsrv(const Register& rd,
1258 const Register& rn,
1259 const Register& rm) {
1260 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1261 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1262 Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
1263}
1264
1265
1266void Assembler::asrv(const Register& rd,
1267 const Register& rn,
1268 const Register& rm) {
1269 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1270 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1271 Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
1272}
1273
1274
1275void Assembler::rorv(const Register& rd,
1276 const Register& rn,
1277 const Register& rm) {
1278 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1279 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1280 Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
1281}
1282
1283
1284// Bitfield operations.
1285void Assembler::bfm(const Register& rd,
1286 const Register& rn,
1287 unsigned immr,
1288 unsigned imms) {
1289 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1290 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1291 Emit(SF(rd) | BFM | N |
1292 ImmR(immr, rd.SizeInBits()) |
1293 ImmS(imms, rn.SizeInBits()) |
1294 Rn(rn) | Rd(rd));
1295}
1296
1297
1298void Assembler::sbfm(const Register& rd,
1299 const Register& rn,
1300 unsigned immr,
1301 unsigned imms) {
1302 DCHECK(rd.Is64Bits() || rn.Is32Bits());
1303 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1304 Emit(SF(rd) | SBFM | N |
1305 ImmR(immr, rd.SizeInBits()) |
1306 ImmS(imms, rn.SizeInBits()) |
1307 Rn(rn) | Rd(rd));
1308}
1309
1310
1311void Assembler::ubfm(const Register& rd,
1312 const Register& rn,
1313 unsigned immr,
1314 unsigned imms) {
1315 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1316 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1317 Emit(SF(rd) | UBFM | N |
1318 ImmR(immr, rd.SizeInBits()) |
1319 ImmS(imms, rn.SizeInBits()) |
1320 Rn(rn) | Rd(rd));
1321}
1322
1323
1324void Assembler::extr(const Register& rd,
1325 const Register& rn,
1326 const Register& rm,
1327 unsigned lsb) {
1328 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1329 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1330 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1331 Emit(SF(rd) | EXTR | N | Rm(rm) |
1332 ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
1333}
1334
1335
1336void Assembler::csel(const Register& rd,
1337 const Register& rn,
1338 const Register& rm,
1339 Condition cond) {
1340 ConditionalSelect(rd, rn, rm, cond, CSEL);
1341}
1342
1343
1344void Assembler::csinc(const Register& rd,
1345 const Register& rn,
1346 const Register& rm,
1347 Condition cond) {
1348 ConditionalSelect(rd, rn, rm, cond, CSINC);
1349}
1350
1351
1352void Assembler::csinv(const Register& rd,
1353 const Register& rn,
1354 const Register& rm,
1355 Condition cond) {
1356 ConditionalSelect(rd, rn, rm, cond, CSINV);
1357}
1358
1359
1360void Assembler::csneg(const Register& rd,
1361 const Register& rn,
1362 const Register& rm,
1363 Condition cond) {
1364 ConditionalSelect(rd, rn, rm, cond, CSNEG);
1365}
1366
1367
1368void Assembler::cset(const Register &rd, Condition cond) {
1369 DCHECK((cond != al) && (cond != nv));
1370 Register zr = AppropriateZeroRegFor(rd);
1371 csinc(rd, zr, zr, NegateCondition(cond));
1372}
1373
1374
1375void Assembler::csetm(const Register &rd, Condition cond) {
1376 DCHECK((cond != al) && (cond != nv));
1377 Register zr = AppropriateZeroRegFor(rd);
1378 csinv(rd, zr, zr, NegateCondition(cond));
1379}
1380
1381
1382void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
1383 DCHECK((cond != al) && (cond != nv));
1384 csinc(rd, rn, rn, NegateCondition(cond));
1385}
1386
1387
1388void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
1389 DCHECK((cond != al) && (cond != nv));
1390 csinv(rd, rn, rn, NegateCondition(cond));
1391}
1392
1393
1394void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
1395 DCHECK((cond != al) && (cond != nv));
1396 csneg(rd, rn, rn, NegateCondition(cond));
1397}
1398
1399
1400void Assembler::ConditionalSelect(const Register& rd,
1401 const Register& rn,
1402 const Register& rm,
1403 Condition cond,
1404 ConditionalSelectOp op) {
1405 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1406 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1407 Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
1408}
1409
1410
1411void Assembler::ccmn(const Register& rn,
1412 const Operand& operand,
1413 StatusFlags nzcv,
1414 Condition cond) {
1415 ConditionalCompare(rn, operand, nzcv, cond, CCMN);
1416}
1417
1418
1419void Assembler::ccmp(const Register& rn,
1420 const Operand& operand,
1421 StatusFlags nzcv,
1422 Condition cond) {
1423 ConditionalCompare(rn, operand, nzcv, cond, CCMP);
1424}
1425
1426
1427void Assembler::DataProcessing3Source(const Register& rd,
1428 const Register& rn,
1429 const Register& rm,
1430 const Register& ra,
1431 DataProcessing3SourceOp op) {
1432 Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
1433}
1434
1435
1436void Assembler::mul(const Register& rd,
1437 const Register& rn,
1438 const Register& rm) {
1439 DCHECK(AreSameSizeAndType(rd, rn, rm));
1440 Register zr = AppropriateZeroRegFor(rn);
1441 DataProcessing3Source(rd, rn, rm, zr, MADD);
1442}
1443
1444
1445void Assembler::madd(const Register& rd,
1446 const Register& rn,
1447 const Register& rm,
1448 const Register& ra) {
1449 DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
1450 DataProcessing3Source(rd, rn, rm, ra, MADD);
1451}
1452
1453
1454void Assembler::mneg(const Register& rd,
1455 const Register& rn,
1456 const Register& rm) {
1457 DCHECK(AreSameSizeAndType(rd, rn, rm));
1458 Register zr = AppropriateZeroRegFor(rn);
1459 DataProcessing3Source(rd, rn, rm, zr, MSUB);
1460}
1461
1462
1463void Assembler::msub(const Register& rd,
1464 const Register& rn,
1465 const Register& rm,
1466 const Register& ra) {
1467 DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
1468 DataProcessing3Source(rd, rn, rm, ra, MSUB);
1469}
1470
1471
1472void Assembler::smaddl(const Register& rd,
1473 const Register& rn,
1474 const Register& rm,
1475 const Register& ra) {
1476 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1477 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1478 DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
1479}
1480
1481
1482void Assembler::smsubl(const Register& rd,
1483 const Register& rn,
1484 const Register& rm,
1485 const Register& ra) {
1486 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1487 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1488 DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
1489}
1490
1491
1492void Assembler::umaddl(const Register& rd,
1493 const Register& rn,
1494 const Register& rm,
1495 const Register& ra) {
1496 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1497 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1498 DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
1499}
1500
1501
1502void Assembler::umsubl(const Register& rd,
1503 const Register& rn,
1504 const Register& rm,
1505 const Register& ra) {
1506 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1507 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1508 DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
1509}
1510
1511
1512void Assembler::smull(const Register& rd,
1513 const Register& rn,
1514 const Register& rm) {
1515 DCHECK(rd.Is64Bits());
1516 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1517 DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
1518}
1519
1520
1521void Assembler::smulh(const Register& rd,
1522 const Register& rn,
1523 const Register& rm) {
1524 DCHECK(AreSameSizeAndType(rd, rn, rm));
1525 DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
1526}
1527
1528
1529void Assembler::sdiv(const Register& rd,
1530 const Register& rn,
1531 const Register& rm) {
1532 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1533 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1534 Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
1535}
1536
1537
1538void Assembler::udiv(const Register& rd,
1539 const Register& rn,
1540 const Register& rm) {
1541 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1542 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1543 Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
1544}
1545
1546
1547void Assembler::rbit(const Register& rd,
1548 const Register& rn) {
1549 DataProcessing1Source(rd, rn, RBIT);
1550}
1551
1552
1553void Assembler::rev16(const Register& rd,
1554 const Register& rn) {
1555 DataProcessing1Source(rd, rn, REV16);
1556}
1557
1558
1559void Assembler::rev32(const Register& rd,
1560 const Register& rn) {
1561 DCHECK(rd.Is64Bits());
1562 DataProcessing1Source(rd, rn, REV);
1563}
1564
1565
1566void Assembler::rev(const Register& rd,
1567 const Register& rn) {
1568 DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
1569}
1570
1571
1572void Assembler::clz(const Register& rd,
1573 const Register& rn) {
1574 DataProcessing1Source(rd, rn, CLZ);
1575}
1576
1577
1578void Assembler::cls(const Register& rd,
1579 const Register& rn) {
1580 DataProcessing1Source(rd, rn, CLS);
1581}
1582
1583
1584void Assembler::ldp(const CPURegister& rt,
1585 const CPURegister& rt2,
1586 const MemOperand& src) {
1587 LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
1588}
1589
1590
1591void Assembler::stp(const CPURegister& rt,
1592 const CPURegister& rt2,
1593 const MemOperand& dst) {
1594 LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
1595}
1596
1597
1598void Assembler::ldpsw(const Register& rt,
1599 const Register& rt2,
1600 const MemOperand& src) {
1601 DCHECK(rt.Is64Bits());
1602 LoadStorePair(rt, rt2, src, LDPSW_x);
1603}
1604
1605
1606void Assembler::LoadStorePair(const CPURegister& rt,
1607 const CPURegister& rt2,
1608 const MemOperand& addr,
1609 LoadStorePairOp op) {
1610 // 'rt' and 'rt2' can only be aliased for stores.
1611 DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
1612 DCHECK(AreSameSizeAndType(rt, rt2));
1613
1614 Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1615 ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
1616
1617 Instr addrmodeop;
1618 if (addr.IsImmediateOffset()) {
1619 addrmodeop = LoadStorePairOffsetFixed;
1620 } else {
1621 // Pre-index and post-index modes.
1622 DCHECK(!rt.Is(addr.base()));
1623 DCHECK(!rt2.Is(addr.base()));
1624 DCHECK(addr.offset() != 0);
1625 if (addr.IsPreIndex()) {
1626 addrmodeop = LoadStorePairPreIndexFixed;
1627 } else {
1628 DCHECK(addr.IsPostIndex());
1629 addrmodeop = LoadStorePairPostIndexFixed;
1630 }
1631 }
1632 Emit(addrmodeop | memop);
1633}
1634
1635
1636void Assembler::ldnp(const CPURegister& rt,
1637 const CPURegister& rt2,
1638 const MemOperand& src) {
1639 LoadStorePairNonTemporal(rt, rt2, src,
1640 LoadPairNonTemporalOpFor(rt, rt2));
1641}
1642
1643
1644void Assembler::stnp(const CPURegister& rt,
1645 const CPURegister& rt2,
1646 const MemOperand& dst) {
1647 LoadStorePairNonTemporal(rt, rt2, dst,
1648 StorePairNonTemporalOpFor(rt, rt2));
1649}
1650
1651
1652void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
1653 const CPURegister& rt2,
1654 const MemOperand& addr,
1655 LoadStorePairNonTemporalOp op) {
1656 DCHECK(!rt.Is(rt2));
1657 DCHECK(AreSameSizeAndType(rt, rt2));
1658 DCHECK(addr.IsImmediateOffset());
1659
1660 LSDataSize size = CalcLSPairDataSize(
1661 static_cast<LoadStorePairOp>(op & LoadStorePairMask));
1662 Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1663 ImmLSPair(addr.offset(), size));
1664}
1665
1666
1667// Memory instructions.
1668void Assembler::ldrb(const Register& rt, const MemOperand& src) {
1669 LoadStore(rt, src, LDRB_w);
1670}
1671
1672
1673void Assembler::strb(const Register& rt, const MemOperand& dst) {
1674 LoadStore(rt, dst, STRB_w);
1675}
1676
1677
1678void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
1679 LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
1680}
1681
1682
1683void Assembler::ldrh(const Register& rt, const MemOperand& src) {
1684 LoadStore(rt, src, LDRH_w);
1685}
1686
1687
1688void Assembler::strh(const Register& rt, const MemOperand& dst) {
1689 LoadStore(rt, dst, STRH_w);
1690}
1691
1692
1693void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
1694 LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
1695}
1696
1697
1698void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
1699 LoadStore(rt, src, LoadOpFor(rt));
1700}
1701
1702
1703void Assembler::str(const CPURegister& rt, const MemOperand& src) {
1704 LoadStore(rt, src, StoreOpFor(rt));
1705}
1706
1707
1708void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
1709 DCHECK(rt.Is64Bits());
1710 LoadStore(rt, src, LDRSW_x);
1711}
1712
1713
1714void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) {
1715 // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
1716 // constant pool. It should not be emitted.
1717 DCHECK(!rt.IsZero());
1718 Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
1719}
1720
1721
1722void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
1723 // Currently we only support 64-bit literals.
1724 DCHECK(rt.Is64Bits());
1725
1726 RecordRelocInfo(imm.rmode(), imm.value());
1727 BlockConstPoolFor(1);
1728 // The load will be patched when the constpool is emitted, patching code
1729 // expect a load literal with offset 0.
1730 ldr_pcrel(rt, 0);
1731}
1732
1733
1734void Assembler::mov(const Register& rd, const Register& rm) {
1735 // Moves involving the stack pointer are encoded as add immediate with
1736 // second operand of zero. Otherwise, orr with first operand zr is
1737 // used.
1738 if (rd.IsSP() || rm.IsSP()) {
1739 add(rd, rm, 0);
1740 } else {
1741 orr(rd, AppropriateZeroRegFor(rd), rm);
1742 }
1743}
1744
1745
1746void Assembler::mvn(const Register& rd, const Operand& operand) {
1747 orn(rd, AppropriateZeroRegFor(rd), operand);
1748}
1749
1750
1751void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
1752 DCHECK(rt.Is64Bits());
1753 Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
1754}
1755
1756
1757void Assembler::msr(SystemRegister sysreg, const Register& rt) {
1758 DCHECK(rt.Is64Bits());
1759 Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
1760}
1761
1762
1763void Assembler::hint(SystemHint code) {
1764 Emit(HINT | ImmHint(code) | Rt(xzr));
1765}
1766
1767
1768void Assembler::dmb(BarrierDomain domain, BarrierType type) {
1769 Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1770}
1771
1772
1773void Assembler::dsb(BarrierDomain domain, BarrierType type) {
1774 Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1775}
1776
1777
1778void Assembler::isb() {
1779 Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
1780}
1781
1782
1783void Assembler::fmov(FPRegister fd, double imm) {
1784 DCHECK(fd.Is64Bits());
1785 DCHECK(IsImmFP64(imm));
1786 Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
1787}
1788
1789
1790void Assembler::fmov(FPRegister fd, float imm) {
1791 DCHECK(fd.Is32Bits());
1792 DCHECK(IsImmFP32(imm));
1793 Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
1794}
1795
1796
1797void Assembler::fmov(Register rd, FPRegister fn) {
1798 DCHECK(rd.SizeInBits() == fn.SizeInBits());
1799 FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
1800 Emit(op | Rd(rd) | Rn(fn));
1801}
1802
1803
1804void Assembler::fmov(FPRegister fd, Register rn) {
1805 DCHECK(fd.SizeInBits() == rn.SizeInBits());
1806 FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
1807 Emit(op | Rd(fd) | Rn(rn));
1808}
1809
1810
1811void Assembler::fmov(FPRegister fd, FPRegister fn) {
1812 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1813 Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
1814}
1815
1816
1817void Assembler::fadd(const FPRegister& fd,
1818 const FPRegister& fn,
1819 const FPRegister& fm) {
1820 FPDataProcessing2Source(fd, fn, fm, FADD);
1821}
1822
1823
1824void Assembler::fsub(const FPRegister& fd,
1825 const FPRegister& fn,
1826 const FPRegister& fm) {
1827 FPDataProcessing2Source(fd, fn, fm, FSUB);
1828}
1829
1830
1831void Assembler::fmul(const FPRegister& fd,
1832 const FPRegister& fn,
1833 const FPRegister& fm) {
1834 FPDataProcessing2Source(fd, fn, fm, FMUL);
1835}
1836
1837
1838void Assembler::fmadd(const FPRegister& fd,
1839 const FPRegister& fn,
1840 const FPRegister& fm,
1841 const FPRegister& fa) {
1842 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
1843}
1844
1845
1846void Assembler::fmsub(const FPRegister& fd,
1847 const FPRegister& fn,
1848 const FPRegister& fm,
1849 const FPRegister& fa) {
1850 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
1851}
1852
1853
1854void Assembler::fnmadd(const FPRegister& fd,
1855 const FPRegister& fn,
1856 const FPRegister& fm,
1857 const FPRegister& fa) {
1858 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
1859}
1860
1861
1862void Assembler::fnmsub(const FPRegister& fd,
1863 const FPRegister& fn,
1864 const FPRegister& fm,
1865 const FPRegister& fa) {
1866 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
1867}
1868
1869
1870void Assembler::fdiv(const FPRegister& fd,
1871 const FPRegister& fn,
1872 const FPRegister& fm) {
1873 FPDataProcessing2Source(fd, fn, fm, FDIV);
1874}
1875
1876
1877void Assembler::fmax(const FPRegister& fd,
1878 const FPRegister& fn,
1879 const FPRegister& fm) {
1880 FPDataProcessing2Source(fd, fn, fm, FMAX);
1881}
1882
1883
1884void Assembler::fmaxnm(const FPRegister& fd,
1885 const FPRegister& fn,
1886 const FPRegister& fm) {
1887 FPDataProcessing2Source(fd, fn, fm, FMAXNM);
1888}
1889
1890
1891void Assembler::fmin(const FPRegister& fd,
1892 const FPRegister& fn,
1893 const FPRegister& fm) {
1894 FPDataProcessing2Source(fd, fn, fm, FMIN);
1895}
1896
1897
1898void Assembler::fminnm(const FPRegister& fd,
1899 const FPRegister& fn,
1900 const FPRegister& fm) {
1901 FPDataProcessing2Source(fd, fn, fm, FMINNM);
1902}
1903
1904
1905void Assembler::fabs(const FPRegister& fd,
1906 const FPRegister& fn) {
1907 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1908 FPDataProcessing1Source(fd, fn, FABS);
1909}
1910
1911
1912void Assembler::fneg(const FPRegister& fd,
1913 const FPRegister& fn) {
1914 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1915 FPDataProcessing1Source(fd, fn, FNEG);
1916}
1917
1918
1919void Assembler::fsqrt(const FPRegister& fd,
1920 const FPRegister& fn) {
1921 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1922 FPDataProcessing1Source(fd, fn, FSQRT);
1923}
1924
1925
1926void Assembler::frinta(const FPRegister& fd,
1927 const FPRegister& fn) {
1928 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1929 FPDataProcessing1Source(fd, fn, FRINTA);
1930}
1931
1932
1933void Assembler::frintm(const FPRegister& fd,
1934 const FPRegister& fn) {
1935 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1936 FPDataProcessing1Source(fd, fn, FRINTM);
1937}
1938
1939
1940void Assembler::frintn(const FPRegister& fd,
1941 const FPRegister& fn) {
1942 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1943 FPDataProcessing1Source(fd, fn, FRINTN);
1944}
1945
1946
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001947void Assembler::frintp(const FPRegister& fd, const FPRegister& fn) {
1948 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1949 FPDataProcessing1Source(fd, fn, FRINTP);
1950}
1951
1952
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001953void Assembler::frintz(const FPRegister& fd,
1954 const FPRegister& fn) {
1955 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1956 FPDataProcessing1Source(fd, fn, FRINTZ);
1957}
1958
1959
1960void Assembler::fcmp(const FPRegister& fn,
1961 const FPRegister& fm) {
1962 DCHECK(fn.SizeInBits() == fm.SizeInBits());
1963 Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
1964}
1965
1966
1967void Assembler::fcmp(const FPRegister& fn,
1968 double value) {
1969 USE(value);
1970 // Although the fcmp instruction can strictly only take an immediate value of
1971 // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
1972 // affect the result of the comparison.
1973 DCHECK(value == 0.0);
1974 Emit(FPType(fn) | FCMP_zero | Rn(fn));
1975}
1976
1977
1978void Assembler::fccmp(const FPRegister& fn,
1979 const FPRegister& fm,
1980 StatusFlags nzcv,
1981 Condition cond) {
1982 DCHECK(fn.SizeInBits() == fm.SizeInBits());
1983 Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
1984}
1985
1986
1987void Assembler::fcsel(const FPRegister& fd,
1988 const FPRegister& fn,
1989 const FPRegister& fm,
1990 Condition cond) {
1991 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1992 DCHECK(fd.SizeInBits() == fm.SizeInBits());
1993 Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
1994}
1995
1996
1997void Assembler::FPConvertToInt(const Register& rd,
1998 const FPRegister& fn,
1999 FPIntegerConvertOp op) {
2000 Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
2001}
2002
2003
2004void Assembler::fcvt(const FPRegister& fd,
2005 const FPRegister& fn) {
2006 if (fd.Is64Bits()) {
2007 // Convert float to double.
2008 DCHECK(fn.Is32Bits());
2009 FPDataProcessing1Source(fd, fn, FCVT_ds);
2010 } else {
2011 // Convert double to float.
2012 DCHECK(fn.Is64Bits());
2013 FPDataProcessing1Source(fd, fn, FCVT_sd);
2014 }
2015}
2016
2017
2018void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
2019 FPConvertToInt(rd, fn, FCVTAU);
2020}
2021
2022
2023void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
2024 FPConvertToInt(rd, fn, FCVTAS);
2025}
2026
2027
2028void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
2029 FPConvertToInt(rd, fn, FCVTMU);
2030}
2031
2032
2033void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
2034 FPConvertToInt(rd, fn, FCVTMS);
2035}
2036
2037
2038void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
2039 FPConvertToInt(rd, fn, FCVTNU);
2040}
2041
2042
2043void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
2044 FPConvertToInt(rd, fn, FCVTNS);
2045}
2046
2047
2048void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
2049 FPConvertToInt(rd, fn, FCVTZU);
2050}
2051
2052
2053void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
2054 FPConvertToInt(rd, fn, FCVTZS);
2055}
2056
2057
2058void Assembler::scvtf(const FPRegister& fd,
2059 const Register& rn,
2060 unsigned fbits) {
2061 if (fbits == 0) {
2062 Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
2063 } else {
2064 Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
2065 Rd(fd));
2066 }
2067}
2068
2069
2070void Assembler::ucvtf(const FPRegister& fd,
2071 const Register& rn,
2072 unsigned fbits) {
2073 if (fbits == 0) {
2074 Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
2075 } else {
2076 Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
2077 Rd(fd));
2078 }
2079}
2080
2081
2082// Note:
2083// Below, a difference in case for the same letter indicates a
2084// negated bit.
2085// If b is 1, then B is 0.
2086Instr Assembler::ImmFP32(float imm) {
2087 DCHECK(IsImmFP32(imm));
2088 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
2089 uint32_t bits = float_to_rawbits(imm);
2090 // bit7: a000.0000
2091 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
2092 // bit6: 0b00.0000
2093 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
2094 // bit5_to_0: 00cd.efgh
2095 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
2096
2097 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
2098}
2099
2100
2101Instr Assembler::ImmFP64(double imm) {
2102 DCHECK(IsImmFP64(imm));
2103 // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
2104 // 0000.0000.0000.0000.0000.0000.0000.0000
2105 uint64_t bits = double_to_rawbits(imm);
2106 // bit7: a000.0000
2107 uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
2108 // bit6: 0b00.0000
2109 uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
2110 // bit5_to_0: 00cd.efgh
2111 uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
2112
2113 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
2114}
2115
2116
2117// Code generation helpers.
2118void Assembler::MoveWide(const Register& rd,
2119 uint64_t imm,
2120 int shift,
2121 MoveWideImmediateOp mov_op) {
2122 // Ignore the top 32 bits of an immediate if we're moving to a W register.
2123 if (rd.Is32Bits()) {
2124 // Check that the top 32 bits are zero (a positive 32-bit number) or top
2125 // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
2126 DCHECK(((imm >> kWRegSizeInBits) == 0) ||
2127 ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff));
2128 imm &= kWRegMask;
2129 }
2130
2131 if (shift >= 0) {
2132 // Explicit shift specified.
2133 DCHECK((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
2134 DCHECK(rd.Is64Bits() || (shift == 0) || (shift == 16));
2135 shift /= 16;
2136 } else {
2137 // Calculate a new immediate and shift combination to encode the immediate
2138 // argument.
2139 shift = 0;
2140 if ((imm & ~0xffffUL) == 0) {
2141 // Nothing to do.
2142 } else if ((imm & ~(0xffffUL << 16)) == 0) {
2143 imm >>= 16;
2144 shift = 1;
2145 } else if ((imm & ~(0xffffUL << 32)) == 0) {
2146 DCHECK(rd.Is64Bits());
2147 imm >>= 32;
2148 shift = 2;
2149 } else if ((imm & ~(0xffffUL << 48)) == 0) {
2150 DCHECK(rd.Is64Bits());
2151 imm >>= 48;
2152 shift = 3;
2153 }
2154 }
2155
2156 DCHECK(is_uint16(imm));
2157
2158 Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
2159 Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
2160}
2161
2162
2163void Assembler::AddSub(const Register& rd,
2164 const Register& rn,
2165 const Operand& operand,
2166 FlagsUpdate S,
2167 AddSubOp op) {
2168 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2169 DCHECK(!operand.NeedsRelocation(this));
2170 if (operand.IsImmediate()) {
2171 int64_t immediate = operand.ImmediateValue();
2172 DCHECK(IsImmAddSub(immediate));
2173 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
2174 Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
2175 ImmAddSub(immediate) | dest_reg | RnSP(rn));
2176 } else if (operand.IsShiftedRegister()) {
2177 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
2178 DCHECK(operand.shift() != ROR);
2179
2180 // For instructions of the form:
2181 // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
2182 // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
2183 // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
2184 // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
2185 // or their 64-bit register equivalents, convert the operand from shifted to
2186 // extended register mode, and emit an add/sub extended instruction.
2187 if (rn.IsSP() || rd.IsSP()) {
2188 DCHECK(!(rd.IsSP() && (S == SetFlags)));
2189 DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
2190 AddSubExtendedFixed | op);
2191 } else {
2192 DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
2193 }
2194 } else {
2195 DCHECK(operand.IsExtendedRegister());
2196 DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
2197 }
2198}
2199
2200
2201void Assembler::AddSubWithCarry(const Register& rd,
2202 const Register& rn,
2203 const Operand& operand,
2204 FlagsUpdate S,
2205 AddSubWithCarryOp op) {
2206 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2207 DCHECK(rd.SizeInBits() == operand.reg().SizeInBits());
2208 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
2209 DCHECK(!operand.NeedsRelocation(this));
2210 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
2211}
2212
2213
2214void Assembler::hlt(int code) {
2215 DCHECK(is_uint16(code));
2216 Emit(HLT | ImmException(code));
2217}
2218
2219
2220void Assembler::brk(int code) {
2221 DCHECK(is_uint16(code));
2222 Emit(BRK | ImmException(code));
2223}
2224
2225
2226void Assembler::EmitStringData(const char* string) {
2227 size_t len = strlen(string) + 1;
2228 DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
2229 EmitData(string, len);
2230 // Pad with NULL characters until pc_ is aligned.
2231 const char pad[] = {'\0', '\0', '\0', '\0'};
2232 STATIC_ASSERT(sizeof(pad) == kInstructionSize);
2233 EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset());
2234}
2235
2236
2237void Assembler::debug(const char* message, uint32_t code, Instr params) {
2238#ifdef USE_SIMULATOR
2239 // Don't generate simulator specific code if we are building a snapshot, which
2240 // might be run on real hardware.
2241 if (!serializer_enabled()) {
2242 // The arguments to the debug marker need to be contiguous in memory, so
2243 // make sure we don't try to emit pools.
2244 BlockPoolsScope scope(this);
2245
2246 Label start;
2247 bind(&start);
2248
2249 // Refer to instructions-arm64.h for a description of the marker and its
2250 // arguments.
2251 hlt(kImmExceptionIsDebug);
2252 DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
2253 dc32(code);
2254 DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
2255 dc32(params);
2256 DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
2257 EmitStringData(message);
2258 hlt(kImmExceptionIsUnreachable);
2259
2260 return;
2261 }
2262 // Fall through if Serializer is enabled.
2263#endif
2264
2265 if (params & BREAK) {
2266 hlt(kImmExceptionIsDebug);
2267 }
2268}
2269
2270
2271void Assembler::Logical(const Register& rd,
2272 const Register& rn,
2273 const Operand& operand,
2274 LogicalOp op) {
2275 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2276 DCHECK(!operand.NeedsRelocation(this));
2277 if (operand.IsImmediate()) {
2278 int64_t immediate = operand.ImmediateValue();
2279 unsigned reg_size = rd.SizeInBits();
2280
2281 DCHECK(immediate != 0);
2282 DCHECK(immediate != -1);
2283 DCHECK(rd.Is64Bits() || is_uint32(immediate));
2284
2285 // If the operation is NOT, invert the operation and immediate.
2286 if ((op & NOT) == NOT) {
2287 op = static_cast<LogicalOp>(op & ~NOT);
2288 immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
2289 }
2290
2291 unsigned n, imm_s, imm_r;
2292 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
2293 // Immediate can be encoded in the instruction.
2294 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
2295 } else {
2296 // This case is handled in the macro assembler.
2297 UNREACHABLE();
2298 }
2299 } else {
2300 DCHECK(operand.IsShiftedRegister());
2301 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
2302 Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
2303 DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
2304 }
2305}
2306
2307
2308void Assembler::LogicalImmediate(const Register& rd,
2309 const Register& rn,
2310 unsigned n,
2311 unsigned imm_s,
2312 unsigned imm_r,
2313 LogicalOp op) {
2314 unsigned reg_size = rd.SizeInBits();
2315 Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
2316 Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
2317 ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
2318 Rn(rn));
2319}
2320
2321
2322void Assembler::ConditionalCompare(const Register& rn,
2323 const Operand& operand,
2324 StatusFlags nzcv,
2325 Condition cond,
2326 ConditionalCompareOp op) {
2327 Instr ccmpop;
2328 DCHECK(!operand.NeedsRelocation(this));
2329 if (operand.IsImmediate()) {
2330 int64_t immediate = operand.ImmediateValue();
2331 DCHECK(IsImmConditionalCompare(immediate));
2332 ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
2333 } else {
2334 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
2335 ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
2336 }
2337 Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
2338}
2339
2340
2341void Assembler::DataProcessing1Source(const Register& rd,
2342 const Register& rn,
2343 DataProcessing1SourceOp op) {
2344 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2345 Emit(SF(rn) | op | Rn(rn) | Rd(rd));
2346}
2347
2348
2349void Assembler::FPDataProcessing1Source(const FPRegister& fd,
2350 const FPRegister& fn,
2351 FPDataProcessing1SourceOp op) {
2352 Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
2353}
2354
2355
2356void Assembler::FPDataProcessing2Source(const FPRegister& fd,
2357 const FPRegister& fn,
2358 const FPRegister& fm,
2359 FPDataProcessing2SourceOp op) {
2360 DCHECK(fd.SizeInBits() == fn.SizeInBits());
2361 DCHECK(fd.SizeInBits() == fm.SizeInBits());
2362 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
2363}
2364
2365
2366void Assembler::FPDataProcessing3Source(const FPRegister& fd,
2367 const FPRegister& fn,
2368 const FPRegister& fm,
2369 const FPRegister& fa,
2370 FPDataProcessing3SourceOp op) {
2371 DCHECK(AreSameSizeAndType(fd, fn, fm, fa));
2372 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
2373}
2374
2375
2376void Assembler::EmitShift(const Register& rd,
2377 const Register& rn,
2378 Shift shift,
2379 unsigned shift_amount) {
2380 switch (shift) {
2381 case LSL:
2382 lsl(rd, rn, shift_amount);
2383 break;
2384 case LSR:
2385 lsr(rd, rn, shift_amount);
2386 break;
2387 case ASR:
2388 asr(rd, rn, shift_amount);
2389 break;
2390 case ROR:
2391 ror(rd, rn, shift_amount);
2392 break;
2393 default:
2394 UNREACHABLE();
2395 }
2396}
2397
2398
2399void Assembler::EmitExtendShift(const Register& rd,
2400 const Register& rn,
2401 Extend extend,
2402 unsigned left_shift) {
2403 DCHECK(rd.SizeInBits() >= rn.SizeInBits());
2404 unsigned reg_size = rd.SizeInBits();
2405 // Use the correct size of register.
2406 Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
2407 // Bits extracted are high_bit:0.
2408 unsigned high_bit = (8 << (extend & 0x3)) - 1;
2409 // Number of bits left in the result that are not introduced by the shift.
2410 unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
2411
2412 if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
2413 switch (extend) {
2414 case UXTB:
2415 case UXTH:
2416 case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
2417 case SXTB:
2418 case SXTH:
2419 case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
2420 case UXTX:
2421 case SXTX: {
2422 DCHECK(rn.SizeInBits() == kXRegSizeInBits);
2423 // Nothing to extend. Just shift.
2424 lsl(rd, rn_, left_shift);
2425 break;
2426 }
2427 default: UNREACHABLE();
2428 }
2429 } else {
2430 // No need to extend as the extended bits would be shifted away.
2431 lsl(rd, rn_, left_shift);
2432 }
2433}
2434
2435
2436void Assembler::DataProcShiftedRegister(const Register& rd,
2437 const Register& rn,
2438 const Operand& operand,
2439 FlagsUpdate S,
2440 Instr op) {
2441 DCHECK(operand.IsShiftedRegister());
2442 DCHECK(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
2443 DCHECK(!operand.NeedsRelocation(this));
2444 Emit(SF(rd) | op | Flags(S) |
2445 ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
2446 Rm(operand.reg()) | Rn(rn) | Rd(rd));
2447}
2448
2449
2450void Assembler::DataProcExtendedRegister(const Register& rd,
2451 const Register& rn,
2452 const Operand& operand,
2453 FlagsUpdate S,
2454 Instr op) {
2455 DCHECK(!operand.NeedsRelocation(this));
2456 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
2457 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
2458 ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
2459 dest_reg | RnSP(rn));
2460}
2461
2462
2463bool Assembler::IsImmAddSub(int64_t immediate) {
2464 return is_uint12(immediate) ||
2465 (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
2466}
2467
2468void Assembler::LoadStore(const CPURegister& rt,
2469 const MemOperand& addr,
2470 LoadStoreOp op) {
2471 Instr memop = op | Rt(rt) | RnSP(addr.base());
2472 int64_t offset = addr.offset();
2473
2474 if (addr.IsImmediateOffset()) {
2475 LSDataSize size = CalcLSDataSize(op);
2476 if (IsImmLSScaled(offset, size)) {
2477 // Use the scaled addressing mode.
2478 Emit(LoadStoreUnsignedOffsetFixed | memop |
2479 ImmLSUnsigned(offset >> size));
2480 } else if (IsImmLSUnscaled(offset)) {
2481 // Use the unscaled addressing mode.
2482 Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
2483 } else {
2484 // This case is handled in the macro assembler.
2485 UNREACHABLE();
2486 }
2487 } else if (addr.IsRegisterOffset()) {
2488 Extend ext = addr.extend();
2489 Shift shift = addr.shift();
2490 unsigned shift_amount = addr.shift_amount();
2491
2492 // LSL is encoded in the option field as UXTX.
2493 if (shift == LSL) {
2494 ext = UXTX;
2495 }
2496
2497 // Shifts are encoded in one bit, indicating a left shift by the memory
2498 // access size.
2499 DCHECK((shift_amount == 0) ||
2500 (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
2501 Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
2502 ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
2503 } else {
2504 // Pre-index and post-index modes.
2505 DCHECK(!rt.Is(addr.base()));
2506 if (IsImmLSUnscaled(offset)) {
2507 if (addr.IsPreIndex()) {
2508 Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
2509 } else {
2510 DCHECK(addr.IsPostIndex());
2511 Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
2512 }
2513 } else {
2514 // This case is handled in the macro assembler.
2515 UNREACHABLE();
2516 }
2517 }
2518}
2519
2520
2521bool Assembler::IsImmLSUnscaled(int64_t offset) {
2522 return is_int9(offset);
2523}
2524
2525
2526bool Assembler::IsImmLSScaled(int64_t offset, LSDataSize size) {
2527 bool offset_is_size_multiple = (((offset >> size) << size) == offset);
2528 return offset_is_size_multiple && is_uint12(offset >> size);
2529}
2530
2531
2532bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
2533 bool offset_is_size_multiple = (((offset >> size) << size) == offset);
2534 return offset_is_size_multiple && is_int7(offset >> size);
2535}
2536
2537
2538// Test if a given value can be encoded in the immediate field of a logical
2539// instruction.
2540// If it can be encoded, the function returns true, and values pointed to by n,
2541// imm_s and imm_r are updated with immediates encoded in the format required
2542// by the corresponding fields in the logical instruction.
2543// If it can not be encoded, the function returns false, and the values pointed
2544// to by n, imm_s and imm_r are undefined.
2545bool Assembler::IsImmLogical(uint64_t value,
2546 unsigned width,
2547 unsigned* n,
2548 unsigned* imm_s,
2549 unsigned* imm_r) {
2550 DCHECK((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
2551 DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
2552
2553 bool negate = false;
2554
2555 // Logical immediates are encoded using parameters n, imm_s and imm_r using
2556 // the following table:
2557 //
2558 // N imms immr size S R
2559 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
2560 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
2561 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
2562 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
2563 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
2564 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
2565 // (s bits must not be all set)
2566 //
2567 // A pattern is constructed of size bits, where the least significant S+1 bits
2568 // are set. The pattern is rotated right by R, and repeated across a 32 or
2569 // 64-bit value, depending on destination register width.
2570 //
2571 // Put another way: the basic format of a logical immediate is a single
2572 // contiguous stretch of 1 bits, repeated across the whole word at intervals
2573 // given by a power of 2. To identify them quickly, we first locate the
2574 // lowest stretch of 1 bits, then the next 1 bit above that; that combination
2575 // is different for every logical immediate, so it gives us all the
2576 // information we need to identify the only logical immediate that our input
2577 // could be, and then we simply check if that's the value we actually have.
2578 //
2579 // (The rotation parameter does give the possibility of the stretch of 1 bits
2580 // going 'round the end' of the word. To deal with that, we observe that in
2581 // any situation where that happens the bitwise NOT of the value is also a
2582 // valid logical immediate. So we simply invert the input whenever its low bit
2583 // is set, and then we know that the rotated case can't arise.)
2584
2585 if (value & 1) {
2586 // If the low bit is 1, negate the value, and set a flag to remember that we
2587 // did (so that we can adjust the return values appropriately).
2588 negate = true;
2589 value = ~value;
2590 }
2591
2592 if (width == kWRegSizeInBits) {
2593 // To handle 32-bit logical immediates, the very easiest thing is to repeat
2594 // the input value twice to make a 64-bit word. The correct encoding of that
2595 // as a logical immediate will also be the correct encoding of the 32-bit
2596 // value.
2597
2598 // The most-significant 32 bits may not be zero (ie. negate is true) so
2599 // shift the value left before duplicating it.
2600 value <<= kWRegSizeInBits;
2601 value |= value >> kWRegSizeInBits;
2602 }
2603
2604 // The basic analysis idea: imagine our input word looks like this.
2605 //
2606 // 0011111000111110001111100011111000111110001111100011111000111110
2607 // c b a
2608 // |<--d-->|
2609 //
2610 // We find the lowest set bit (as an actual power-of-2 value, not its index)
2611 // and call it a. Then we add a to our original number, which wipes out the
2612 // bottommost stretch of set bits and replaces it with a 1 carried into the
2613 // next zero bit. Then we look for the new lowest set bit, which is in
2614 // position b, and subtract it, so now our number is just like the original
2615 // but with the lowest stretch of set bits completely gone. Now we find the
2616 // lowest set bit again, which is position c in the diagram above. Then we'll
2617 // measure the distance d between bit positions a and c (using CLZ), and that
2618 // tells us that the only valid logical immediate that could possibly be equal
2619 // to this number is the one in which a stretch of bits running from a to just
2620 // below b is replicated every d bits.
2621 uint64_t a = LargestPowerOf2Divisor(value);
2622 uint64_t value_plus_a = value + a;
2623 uint64_t b = LargestPowerOf2Divisor(value_plus_a);
2624 uint64_t value_plus_a_minus_b = value_plus_a - b;
2625 uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b);
2626
2627 int d, clz_a, out_n;
2628 uint64_t mask;
2629
2630 if (c != 0) {
2631 // The general case, in which there is more than one stretch of set bits.
2632 // Compute the repeat distance d, and set up a bitmask covering the basic
2633 // unit of repetition (i.e. a word with the bottom d bits set). Also, in all
2634 // of these cases the N bit of the output will be zero.
2635 clz_a = CountLeadingZeros(a, kXRegSizeInBits);
2636 int clz_c = CountLeadingZeros(c, kXRegSizeInBits);
2637 d = clz_a - clz_c;
2638 mask = ((V8_UINT64_C(1) << d) - 1);
2639 out_n = 0;
2640 } else {
2641 // Handle degenerate cases.
2642 //
2643 // If any of those 'find lowest set bit' operations didn't find a set bit at
2644 // all, then the word will have been zero thereafter, so in particular the
2645 // last lowest_set_bit operation will have returned zero. So we can test for
2646 // all the special case conditions in one go by seeing if c is zero.
2647 if (a == 0) {
2648 // The input was zero (or all 1 bits, which will come to here too after we
2649 // inverted it at the start of the function), for which we just return
2650 // false.
2651 return false;
2652 } else {
2653 // Otherwise, if c was zero but a was not, then there's just one stretch
2654 // of set bits in our word, meaning that we have the trivial case of
2655 // d == 64 and only one 'repetition'. Set up all the same variables as in
2656 // the general case above, and set the N bit in the output.
2657 clz_a = CountLeadingZeros(a, kXRegSizeInBits);
2658 d = 64;
2659 mask = ~V8_UINT64_C(0);
2660 out_n = 1;
2661 }
2662 }
2663
2664 // If the repeat period d is not a power of two, it can't be encoded.
2665 if (!IS_POWER_OF_TWO(d)) {
2666 return false;
2667 }
2668
2669 if (((b - a) & ~mask) != 0) {
2670 // If the bit stretch (b - a) does not fit within the mask derived from the
2671 // repeat period, then fail.
2672 return false;
2673 }
2674
2675 // The only possible option is b - a repeated every d bits. Now we're going to
2676 // actually construct the valid logical immediate derived from that
2677 // specification, and see if it equals our original input.
2678 //
2679 // To repeat a value every d bits, we multiply it by a number of the form
2680 // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
2681 // be derived using a table lookup on CLZ(d).
2682 static const uint64_t multipliers[] = {
2683 0x0000000000000001UL,
2684 0x0000000100000001UL,
2685 0x0001000100010001UL,
2686 0x0101010101010101UL,
2687 0x1111111111111111UL,
2688 0x5555555555555555UL,
2689 };
2690 int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57;
2691 // Ensure that the index to the multipliers array is within bounds.
2692 DCHECK((multiplier_idx >= 0) &&
2693 (static_cast<size_t>(multiplier_idx) < arraysize(multipliers)));
2694 uint64_t multiplier = multipliers[multiplier_idx];
2695 uint64_t candidate = (b - a) * multiplier;
2696
2697 if (value != candidate) {
2698 // The candidate pattern doesn't match our input value, so fail.
2699 return false;
2700 }
2701
2702 // We have a match! This is a valid logical immediate, so now we have to
2703 // construct the bits and pieces of the instruction encoding that generates
2704 // it.
2705
2706 // Count the set bits in our basic stretch. The special case of clz(0) == -1
2707 // makes the answer come out right for stretches that reach the very top of
2708 // the word (e.g. numbers like 0xffffc00000000000).
2709 int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits);
2710 int s = clz_a - clz_b;
2711
2712 // Decide how many bits to rotate right by, to put the low bit of that basic
2713 // stretch in position a.
2714 int r;
2715 if (negate) {
2716 // If we inverted the input right at the start of this function, here's
2717 // where we compensate: the number of set bits becomes the number of clear
2718 // bits, and the rotation count is based on position b rather than position
2719 // a (since b is the location of the 'lowest' 1 bit after inversion).
2720 s = d - s;
2721 r = (clz_b + 1) & (d - 1);
2722 } else {
2723 r = (clz_a + 1) & (d - 1);
2724 }
2725
2726 // Now we're done, except for having to encode the S output in such a way that
2727 // it gives both the number of set bits and the length of the repeated
2728 // segment. The s field is encoded like this:
2729 //
2730 // imms size S
2731 // ssssss 64 UInt(ssssss)
2732 // 0sssss 32 UInt(sssss)
2733 // 10ssss 16 UInt(ssss)
2734 // 110sss 8 UInt(sss)
2735 // 1110ss 4 UInt(ss)
2736 // 11110s 2 UInt(s)
2737 //
2738 // So we 'or' (-d << 1) with our computed s to form imms.
2739 *n = out_n;
2740 *imm_s = ((-d << 1) | (s - 1)) & 0x3f;
2741 *imm_r = r;
2742
2743 return true;
2744}
2745
2746
2747bool Assembler::IsImmConditionalCompare(int64_t immediate) {
2748 return is_uint5(immediate);
2749}
2750
2751
2752bool Assembler::IsImmFP32(float imm) {
2753 // Valid values will have the form:
2754 // aBbb.bbbc.defg.h000.0000.0000.0000.0000
2755 uint32_t bits = float_to_rawbits(imm);
2756 // bits[19..0] are cleared.
2757 if ((bits & 0x7ffff) != 0) {
2758 return false;
2759 }
2760
2761 // bits[29..25] are all set or all cleared.
2762 uint32_t b_pattern = (bits >> 16) & 0x3e00;
2763 if (b_pattern != 0 && b_pattern != 0x3e00) {
2764 return false;
2765 }
2766
2767 // bit[30] and bit[29] are opposite.
2768 if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
2769 return false;
2770 }
2771
2772 return true;
2773}
2774
2775
2776bool Assembler::IsImmFP64(double imm) {
2777 // Valid values will have the form:
2778 // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
2779 // 0000.0000.0000.0000.0000.0000.0000.0000
2780 uint64_t bits = double_to_rawbits(imm);
2781 // bits[47..0] are cleared.
2782 if ((bits & 0xffffffffffffL) != 0) {
2783 return false;
2784 }
2785
2786 // bits[61..54] are all set or all cleared.
2787 uint32_t b_pattern = (bits >> 48) & 0x3fc0;
2788 if (b_pattern != 0 && b_pattern != 0x3fc0) {
2789 return false;
2790 }
2791
2792 // bit[62] and bit[61] are opposite.
2793 if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
2794 return false;
2795 }
2796
2797 return true;
2798}
2799
2800
2801void Assembler::GrowBuffer() {
2802 if (!own_buffer_) FATAL("external code buffer is too small");
2803
2804 // Compute new buffer size.
2805 CodeDesc desc; // the new buffer
2806 if (buffer_size_ < 1 * MB) {
2807 desc.buffer_size = 2 * buffer_size_;
2808 } else {
2809 desc.buffer_size = buffer_size_ + 1 * MB;
2810 }
2811 CHECK_GT(desc.buffer_size, 0); // No overflow.
2812
2813 byte* buffer = reinterpret_cast<byte*>(buffer_);
2814
2815 // Set up new buffer.
2816 desc.buffer = NewArray<byte>(desc.buffer_size);
2817
2818 desc.instr_size = pc_offset();
2819 desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
2820
2821 // Copy the data.
2822 intptr_t pc_delta = desc.buffer - buffer;
2823 intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
2824 (buffer + buffer_size_);
2825 memmove(desc.buffer, buffer, desc.instr_size);
2826 memmove(reloc_info_writer.pos() + rc_delta,
2827 reloc_info_writer.pos(), desc.reloc_size);
2828
2829 // Switch buffers.
2830 DeleteArray(buffer_);
2831 buffer_ = desc.buffer;
2832 buffer_size_ = desc.buffer_size;
2833 pc_ = reinterpret_cast<byte*>(pc_) + pc_delta;
2834 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2835 reloc_info_writer.last_pc() + pc_delta);
2836
2837 // None of our relocation types are pc relative pointing outside the code
2838 // buffer nor pc absolute pointing inside the code buffer, so there is no need
2839 // to relocate any emitted relocation entries.
2840
2841 // Pending relocation entries are also relative, no need to relocate.
2842}
2843
2844
2845void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2846 // We do not try to reuse pool constants.
2847 RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
2848 if (((rmode >= RelocInfo::JS_RETURN) &&
2849 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
2850 (rmode == RelocInfo::CONST_POOL) ||
2851 (rmode == RelocInfo::VENEER_POOL)) {
2852 // Adjust code for new modes.
2853 DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
2854 || RelocInfo::IsJSReturn(rmode)
2855 || RelocInfo::IsComment(rmode)
2856 || RelocInfo::IsPosition(rmode)
2857 || RelocInfo::IsConstPool(rmode)
2858 || RelocInfo::IsVeneerPool(rmode));
2859 // These modes do not need an entry in the constant pool.
2860 } else {
2861 constpool_.RecordEntry(data, rmode);
2862 // Make sure the constant pool is not emitted in place of the next
2863 // instruction for which we just recorded relocation info.
2864 BlockConstPoolFor(1);
2865 }
2866
2867 if (!RelocInfo::IsNone(rmode)) {
2868 // Don't record external references unless the heap will be serialized.
2869 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
2870 !serializer_enabled() && !emit_debug_code()) {
2871 return;
2872 }
2873 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
2874 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2875 RelocInfo reloc_info_with_ast_id(
2876 reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
2877 ClearRecordedAstId();
2878 reloc_info_writer.Write(&reloc_info_with_ast_id);
2879 } else {
2880 reloc_info_writer.Write(&rinfo);
2881 }
2882 }
2883}
2884
2885
2886void Assembler::BlockConstPoolFor(int instructions) {
2887 int pc_limit = pc_offset() + instructions * kInstructionSize;
2888 if (no_const_pool_before_ < pc_limit) {
2889 no_const_pool_before_ = pc_limit;
2890 // Make sure the pool won't be blocked for too long.
2891 DCHECK(pc_limit < constpool_.MaxPcOffset());
2892 }
2893
2894 if (next_constant_pool_check_ < no_const_pool_before_) {
2895 next_constant_pool_check_ = no_const_pool_before_;
2896 }
2897}
2898
2899
2900void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2901 // Some short sequence of instruction mustn't be broken up by constant pool
2902 // emission, such sequences are protected by calls to BlockConstPoolFor and
2903 // BlockConstPoolScope.
2904 if (is_const_pool_blocked()) {
2905 // Something is wrong if emission is forced and blocked at the same time.
2906 DCHECK(!force_emit);
2907 return;
2908 }
2909
2910 // There is nothing to do if there are no pending constant pool entries.
2911 if (constpool_.IsEmpty()) {
2912 // Calculate the offset of the next check.
2913 SetNextConstPoolCheckIn(kCheckConstPoolInterval);
2914 return;
2915 }
2916
2917 // We emit a constant pool when:
2918 // * requested to do so by parameter force_emit (e.g. after each function).
2919 // * the distance to the first instruction accessing the constant pool is
2920 // kApproxMaxDistToConstPool or more.
2921 // * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
2922 int dist = constpool_.DistanceToFirstUse();
2923 int count = constpool_.EntryCount();
2924 if (!force_emit &&
2925 (dist < kApproxMaxDistToConstPool) &&
2926 (count < kApproxMaxPoolEntryCount)) {
2927 return;
2928 }
2929
2930
2931 // Emit veneers for branches that would go out of range during emission of the
2932 // constant pool.
2933 int worst_case_size = constpool_.WorstCaseSize();
2934 CheckVeneerPool(false, require_jump,
2935 kVeneerDistanceMargin + worst_case_size);
2936
2937 // Check that the code buffer is large enough before emitting the constant
2938 // pool (this includes the gap to the relocation information).
2939 int needed_space = worst_case_size + kGap + 1 * kInstructionSize;
2940 while (buffer_space() <= needed_space) {
2941 GrowBuffer();
2942 }
2943
2944 Label size_check;
2945 bind(&size_check);
2946 constpool_.Emit(require_jump);
2947 DCHECK(SizeOfCodeGeneratedSince(&size_check) <=
2948 static_cast<unsigned>(worst_case_size));
2949
2950 // Since a constant pool was just emitted, move the check offset forward by
2951 // the standard interval.
2952 SetNextConstPoolCheckIn(kCheckConstPoolInterval);
2953}
2954
2955
2956bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
2957 // Account for the branch around the veneers and the guard.
2958 int protection_offset = 2 * kInstructionSize;
2959 return pc_offset() > max_reachable_pc - margin - protection_offset -
2960 static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
2961}
2962
2963
2964void Assembler::RecordVeneerPool(int location_offset, int size) {
2965 RelocInfo rinfo(buffer_ + location_offset,
2966 RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
2967 NULL);
2968 reloc_info_writer.Write(&rinfo);
2969}
2970
2971
2972void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
2973 BlockPoolsScope scope(this);
2974 RecordComment("[ Veneers");
2975
2976 // The exact size of the veneer pool must be recorded (see the comment at the
2977 // declaration site of RecordConstPool()), but computing the number of
2978 // veneers that will be generated is not obvious. So instead we remember the
2979 // current position and will record the size after the pool has been
2980 // generated.
2981 Label size_check;
2982 bind(&size_check);
2983 int veneer_pool_relocinfo_loc = pc_offset();
2984
2985 Label end;
2986 if (need_protection) {
2987 b(&end);
2988 }
2989
2990 EmitVeneersGuard();
2991
2992 Label veneer_size_check;
2993
2994 std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
2995
2996 it = unresolved_branches_.begin();
2997 while (it != unresolved_branches_.end()) {
2998 if (force_emit || ShouldEmitVeneer(it->first, margin)) {
2999 Instruction* branch = InstructionAt(it->second.pc_offset_);
3000 Label* label = it->second.label_;
3001
3002#ifdef DEBUG
3003 bind(&veneer_size_check);
3004#endif
3005 // Patch the branch to point to the current position, and emit a branch
3006 // to the label.
3007 Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
3008 RemoveBranchFromLabelLinkChain(branch, label, veneer);
3009 branch->SetImmPCOffsetTarget(veneer);
3010 b(label);
3011#ifdef DEBUG
3012 DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
3013 static_cast<uint64_t>(kMaxVeneerCodeSize));
3014 veneer_size_check.Unuse();
3015#endif
3016
3017 it_to_delete = it++;
3018 unresolved_branches_.erase(it_to_delete);
3019 } else {
3020 ++it;
3021 }
3022 }
3023
3024 // Record the veneer pool size.
3025 int pool_size = SizeOfCodeGeneratedSince(&size_check);
3026 RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
3027
3028 if (unresolved_branches_.empty()) {
3029 next_veneer_pool_check_ = kMaxInt;
3030 } else {
3031 next_veneer_pool_check_ =
3032 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
3033 }
3034
3035 bind(&end);
3036
3037 RecordComment("]");
3038}
3039
3040
3041void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
3042 int margin) {
3043 // There is nothing to do if there are no pending veneer pool entries.
3044 if (unresolved_branches_.empty()) {
3045 DCHECK(next_veneer_pool_check_ == kMaxInt);
3046 return;
3047 }
3048
3049 DCHECK(pc_offset() < unresolved_branches_first_limit());
3050
3051 // Some short sequence of instruction mustn't be broken up by veneer pool
3052 // emission, such sequences are protected by calls to BlockVeneerPoolFor and
3053 // BlockVeneerPoolScope.
3054 if (is_veneer_pool_blocked()) {
3055 DCHECK(!force_emit);
3056 return;
3057 }
3058
3059 if (!require_jump) {
3060 // Prefer emitting veneers protected by an existing instruction.
3061 margin *= kVeneerNoProtectionFactor;
3062 }
3063 if (force_emit || ShouldEmitVeneers(margin)) {
3064 EmitVeneers(force_emit, require_jump, margin);
3065 } else {
3066 next_veneer_pool_check_ =
3067 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
3068 }
3069}
3070
3071
3072void Assembler::RecordComment(const char* msg) {
3073 if (FLAG_code_comments) {
3074 CheckBuffer();
3075 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
3076 }
3077}
3078
3079
3080int Assembler::buffer_space() const {
3081 return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
3082}
3083
3084
3085void Assembler::RecordJSReturn() {
3086 positions_recorder()->WriteRecordedPositions();
3087 CheckBuffer();
3088 RecordRelocInfo(RelocInfo::JS_RETURN);
3089}
3090
3091
3092void Assembler::RecordDebugBreakSlot() {
3093 positions_recorder()->WriteRecordedPositions();
3094 CheckBuffer();
3095 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
3096}
3097
3098
3099void Assembler::RecordConstPool(int size) {
3100 // We only need this for debugger support, to correctly compute offsets in the
3101 // code.
3102 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
3103}
3104
3105
3106Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
3107 // No out-of-line constant pool support.
3108 DCHECK(!FLAG_enable_ool_constant_pool);
3109 return isolate->factory()->empty_constant_pool_array();
3110}
3111
3112
3113void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
3114 // No out-of-line constant pool support.
3115 DCHECK(!FLAG_enable_ool_constant_pool);
3116 return;
3117}
3118
3119
3120void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
3121 // The code at the current instruction should be:
3122 // adr rd, 0
3123 // nop (adr_far)
3124 // nop (adr_far)
3125 // movz scratch, 0
3126
3127 // Verify the expected code.
3128 Instruction* expected_adr = InstructionAt(0);
3129 CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0));
3130 int rd_code = expected_adr->Rd();
3131 for (int i = 0; i < kAdrFarPatchableNNops; ++i) {
3132 CHECK(InstructionAt((i + 1) * kInstructionSize)->IsNop(ADR_FAR_NOP));
3133 }
3134 Instruction* expected_movz =
3135 InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize);
3136 CHECK(expected_movz->IsMovz() &&
3137 (expected_movz->ImmMoveWide() == 0) &&
3138 (expected_movz->ShiftMoveWide() == 0));
3139 int scratch_code = expected_movz->Rd();
3140
3141 // Patch to load the correct address.
3142 Register rd = Register::XRegFromCode(rd_code);
3143 Register scratch = Register::XRegFromCode(scratch_code);
3144 // Addresses are only 48 bits.
3145 adr(rd, target_offset & 0xFFFF);
3146 movz(scratch, (target_offset >> 16) & 0xFFFF, 16);
3147 movk(scratch, (target_offset >> 32) & 0xFFFF, 32);
3148 DCHECK((target_offset >> 48) == 0);
3149 add(rd, rd, scratch);
3150}
3151
3152
3153} } // namespace v8::internal
3154
3155#endif // V8_TARGET_ARCH_ARM64