Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 | // |
| 3 | // Redistribution and use in source and binary forms, with or without |
| 4 | // modification, are permitted provided that the following conditions are |
| 5 | // met: |
| 6 | // |
| 7 | // * Redistributions of source code must retain the above copyright |
| 8 | // notice, this list of conditions and the following disclaimer. |
| 9 | // * Redistributions in binary form must reproduce the above |
| 10 | // copyright notice, this list of conditions and the following |
| 11 | // disclaimer in the documentation and/or other materials provided |
| 12 | // with the distribution. |
| 13 | // * Neither the name of Google Inc. nor the names of its |
| 14 | // contributors may be used to endorse or promote products derived |
| 15 | // from this software without specific prior written permission. |
| 16 | // |
| 17 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 18 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 19 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 20 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 21 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 22 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 23 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 24 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 25 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 26 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 27 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 28 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 29 | #if V8_TARGET_ARCH_ARM64 |
| 30 | |
| 31 | #define ARM64_DEFINE_REG_STATICS |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 32 | #include "src/arm64/assembler-arm64.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 33 | |
| 34 | #include "src/arm64/assembler-arm64-inl.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 35 | #include "src/arm64/frames-arm64.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 36 | #include "src/base/bits.h" |
| 37 | #include "src/base/cpu.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 38 | #include "src/register-configuration.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 39 | |
| 40 | namespace v8 { |
| 41 | namespace internal { |
| 42 | |
| 43 | |
| 44 | // ----------------------------------------------------------------------------- |
| 45 | // CpuFeatures implementation. |
| 46 | |
| 47 | void CpuFeatures::ProbeImpl(bool cross_compile) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 48 | // AArch64 has no configuration options, no further probing is required. |
| 49 | supported_ = 0; |
| 50 | |
| 51 | // Only use statically determined features for cross compile (snapshot). |
| 52 | if (cross_compile) return; |
| 53 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 54 | // We used to probe for coherent cache support, but on older CPUs it |
| 55 | // causes crashes (crbug.com/524337), and newer CPUs don't even have |
| 56 | // the feature any more. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 57 | } |
| 58 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 59 | void CpuFeatures::PrintTarget() { } |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 60 | void CpuFeatures::PrintFeatures() {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 61 | |
| 62 | // ----------------------------------------------------------------------------- |
| 63 | // CPURegList utilities. |
| 64 | |
| 65 | CPURegister CPURegList::PopLowestIndex() { |
| 66 | DCHECK(IsValid()); |
| 67 | if (IsEmpty()) { |
| 68 | return NoCPUReg; |
| 69 | } |
| 70 | int index = CountTrailingZeros(list_, kRegListSizeInBits); |
| 71 | DCHECK((1 << index) & list_); |
| 72 | Remove(index); |
| 73 | return CPURegister::Create(index, size_, type_); |
| 74 | } |
| 75 | |
| 76 | |
| 77 | CPURegister CPURegList::PopHighestIndex() { |
| 78 | DCHECK(IsValid()); |
| 79 | if (IsEmpty()) { |
| 80 | return NoCPUReg; |
| 81 | } |
| 82 | int index = CountLeadingZeros(list_, kRegListSizeInBits); |
| 83 | index = kRegListSizeInBits - 1 - index; |
| 84 | DCHECK((1 << index) & list_); |
| 85 | Remove(index); |
| 86 | return CPURegister::Create(index, size_, type_); |
| 87 | } |
| 88 | |
| 89 | |
| 90 | void CPURegList::RemoveCalleeSaved() { |
| 91 | if (type() == CPURegister::kRegister) { |
| 92 | Remove(GetCalleeSaved(RegisterSizeInBits())); |
| 93 | } else if (type() == CPURegister::kFPRegister) { |
| 94 | Remove(GetCalleeSavedFP(RegisterSizeInBits())); |
| 95 | } else { |
| 96 | DCHECK(type() == CPURegister::kNoRegister); |
| 97 | DCHECK(IsEmpty()); |
| 98 | // The list must already be empty, so do nothing. |
| 99 | } |
| 100 | } |
| 101 | |
| 102 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 103 | CPURegList CPURegList::GetCalleeSaved(int size) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 104 | return CPURegList(CPURegister::kRegister, size, 19, 29); |
| 105 | } |
| 106 | |
| 107 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 108 | CPURegList CPURegList::GetCalleeSavedFP(int size) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 109 | return CPURegList(CPURegister::kFPRegister, size, 8, 15); |
| 110 | } |
| 111 | |
| 112 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 113 | CPURegList CPURegList::GetCallerSaved(int size) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 114 | // Registers x0-x18 and lr (x30) are caller-saved. |
| 115 | CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18); |
| 116 | list.Combine(lr); |
| 117 | return list; |
| 118 | } |
| 119 | |
| 120 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 121 | CPURegList CPURegList::GetCallerSavedFP(int size) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 122 | // Registers d0-d7 and d16-d31 are caller-saved. |
| 123 | CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7); |
| 124 | list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31)); |
| 125 | return list; |
| 126 | } |
| 127 | |
| 128 | |
| 129 | // This function defines the list of registers which are associated with a |
| 130 | // safepoint slot. Safepoint register slots are saved contiguously on the stack. |
| 131 | // MacroAssembler::SafepointRegisterStackIndex handles mapping from register |
| 132 | // code to index in the safepoint register slots. Any change here can affect |
| 133 | // this mapping. |
| 134 | CPURegList CPURegList::GetSafepointSavedRegisters() { |
| 135 | CPURegList list = CPURegList::GetCalleeSaved(); |
| 136 | list.Combine( |
| 137 | CPURegList(CPURegister::kRegister, kXRegSizeInBits, kJSCallerSaved)); |
| 138 | |
| 139 | // Note that unfortunately we can't use symbolic names for registers and have |
| 140 | // to directly use register codes. This is because this function is used to |
| 141 | // initialize some static variables and we can't rely on register variables |
| 142 | // to be initialized due to static initialization order issues in C++. |
| 143 | |
| 144 | // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be |
| 145 | // preserved outside of the macro assembler. |
| 146 | list.Remove(16); |
| 147 | list.Remove(17); |
| 148 | |
| 149 | // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it |
| 150 | // is a caller-saved register according to the procedure call standard. |
| 151 | list.Combine(18); |
| 152 | |
| 153 | // Drop jssp as the stack pointer doesn't need to be included. |
| 154 | list.Remove(28); |
| 155 | |
| 156 | // Add the link register (x30) to the safepoint list. |
| 157 | list.Combine(30); |
| 158 | |
| 159 | return list; |
| 160 | } |
| 161 | |
| 162 | |
| 163 | // ----------------------------------------------------------------------------- |
| 164 | // Implementation of RelocInfo |
| 165 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 166 | const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 167 | |
| 168 | |
| 169 | bool RelocInfo::IsCodedSpecially() { |
| 170 | // The deserializer needs to know whether a pointer is specially coded. Being |
| 171 | // specially coded on ARM64 means that it is a movz/movk sequence. We don't |
| 172 | // generate those for relocatable pointers. |
| 173 | return false; |
| 174 | } |
| 175 | |
| 176 | |
| 177 | bool RelocInfo::IsInConstantPool() { |
| 178 | Instruction* instr = reinterpret_cast<Instruction*>(pc_); |
| 179 | return instr->IsLdrLiteralX(); |
| 180 | } |
| 181 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 182 | Address RelocInfo::wasm_memory_reference() { |
| 183 | DCHECK(IsWasmMemoryReference(rmode_)); |
| 184 | return Memory::Address_at(Assembler::target_pointer_address_at(pc_)); |
| 185 | } |
| 186 | |
| 187 | uint32_t RelocInfo::wasm_memory_size_reference() { |
| 188 | DCHECK(IsWasmMemorySizeReference(rmode_)); |
| 189 | return Memory::uint32_at(Assembler::target_pointer_address_at(pc_)); |
| 190 | } |
| 191 | |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 192 | Address RelocInfo::wasm_global_reference() { |
| 193 | DCHECK(IsWasmGlobalReference(rmode_)); |
| 194 | return Memory::Address_at(Assembler::target_pointer_address_at(pc_)); |
| 195 | } |
| 196 | |
| 197 | void RelocInfo::unchecked_update_wasm_memory_reference( |
| 198 | Address address, ICacheFlushMode flush_mode) { |
| 199 | Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode); |
| 200 | } |
| 201 | |
| 202 | void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size, |
| 203 | ICacheFlushMode flush_mode) { |
| 204 | Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size; |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 205 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 206 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 207 | Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2, |
| 208 | Register reg3, Register reg4) { |
| 209 | CPURegList regs(reg1, reg2, reg3, reg4); |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 210 | const RegisterConfiguration* config = RegisterConfiguration::Crankshaft(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 211 | for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { |
| 212 | int code = config->GetAllocatableDoubleCode(i); |
| 213 | Register candidate = Register::from_code(code); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 214 | if (regs.IncludesAliasOf(candidate)) continue; |
| 215 | return candidate; |
| 216 | } |
| 217 | UNREACHABLE(); |
| 218 | return NoReg; |
| 219 | } |
| 220 | |
| 221 | |
| 222 | bool AreAliased(const CPURegister& reg1, const CPURegister& reg2, |
| 223 | const CPURegister& reg3, const CPURegister& reg4, |
| 224 | const CPURegister& reg5, const CPURegister& reg6, |
| 225 | const CPURegister& reg7, const CPURegister& reg8) { |
| 226 | int number_of_valid_regs = 0; |
| 227 | int number_of_valid_fpregs = 0; |
| 228 | |
| 229 | RegList unique_regs = 0; |
| 230 | RegList unique_fpregs = 0; |
| 231 | |
| 232 | const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; |
| 233 | |
| 234 | for (unsigned i = 0; i < arraysize(regs); i++) { |
| 235 | if (regs[i].IsRegister()) { |
| 236 | number_of_valid_regs++; |
| 237 | unique_regs |= regs[i].Bit(); |
| 238 | } else if (regs[i].IsFPRegister()) { |
| 239 | number_of_valid_fpregs++; |
| 240 | unique_fpregs |= regs[i].Bit(); |
| 241 | } else { |
| 242 | DCHECK(!regs[i].IsValid()); |
| 243 | } |
| 244 | } |
| 245 | |
| 246 | int number_of_unique_regs = |
| 247 | CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte); |
| 248 | int number_of_unique_fpregs = |
| 249 | CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte); |
| 250 | |
| 251 | DCHECK(number_of_valid_regs >= number_of_unique_regs); |
| 252 | DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs); |
| 253 | |
| 254 | return (number_of_valid_regs != number_of_unique_regs) || |
| 255 | (number_of_valid_fpregs != number_of_unique_fpregs); |
| 256 | } |
| 257 | |
| 258 | |
| 259 | bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2, |
| 260 | const CPURegister& reg3, const CPURegister& reg4, |
| 261 | const CPURegister& reg5, const CPURegister& reg6, |
| 262 | const CPURegister& reg7, const CPURegister& reg8) { |
| 263 | DCHECK(reg1.IsValid()); |
| 264 | bool match = true; |
| 265 | match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1); |
| 266 | match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1); |
| 267 | match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1); |
| 268 | match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1); |
| 269 | match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1); |
| 270 | match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1); |
| 271 | match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1); |
| 272 | return match; |
| 273 | } |
| 274 | |
| 275 | |
| 276 | void Immediate::InitializeHandle(Handle<Object> handle) { |
| 277 | AllowDeferredHandleDereference using_raw_address; |
| 278 | |
| 279 | // Verify all Objects referred by code are NOT in new space. |
| 280 | Object* obj = *handle; |
| 281 | if (obj->IsHeapObject()) { |
| 282 | DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); |
| 283 | value_ = reinterpret_cast<intptr_t>(handle.location()); |
| 284 | rmode_ = RelocInfo::EMBEDDED_OBJECT; |
| 285 | } else { |
| 286 | STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t)); |
| 287 | value_ = reinterpret_cast<intptr_t>(obj); |
| 288 | rmode_ = RelocInfo::NONE64; |
| 289 | } |
| 290 | } |
| 291 | |
| 292 | |
| 293 | bool Operand::NeedsRelocation(const Assembler* assembler) const { |
| 294 | RelocInfo::Mode rmode = immediate_.rmode(); |
| 295 | |
| 296 | if (rmode == RelocInfo::EXTERNAL_REFERENCE) { |
| 297 | return assembler->serializer_enabled(); |
| 298 | } |
| 299 | |
| 300 | return !RelocInfo::IsNone(rmode); |
| 301 | } |
| 302 | |
| 303 | |
| 304 | // Constant Pool. |
| 305 | void ConstPool::RecordEntry(intptr_t data, |
| 306 | RelocInfo::Mode mode) { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 307 | DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::POSITION && |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 308 | mode != RelocInfo::STATEMENT_POSITION && |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 309 | mode != RelocInfo::CONST_POOL && mode != RelocInfo::VENEER_POOL && |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 310 | mode != RelocInfo::CODE_AGE_SEQUENCE && |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 311 | mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 312 | uint64_t raw_data = static_cast<uint64_t>(data); |
| 313 | int offset = assm_->pc_offset(); |
| 314 | if (IsEmpty()) { |
| 315 | first_use_ = offset; |
| 316 | } |
| 317 | |
| 318 | std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset); |
| 319 | if (CanBeShared(mode)) { |
| 320 | shared_entries_.insert(entry); |
| 321 | if (shared_entries_.count(entry.first) == 1) { |
| 322 | shared_entries_count++; |
| 323 | } |
| 324 | } else { |
| 325 | unique_entries_.push_back(entry); |
| 326 | } |
| 327 | |
| 328 | if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) { |
| 329 | // Request constant pool emission after the next instruction. |
| 330 | assm_->SetNextConstPoolCheckIn(1); |
| 331 | } |
| 332 | } |
| 333 | |
| 334 | |
| 335 | int ConstPool::DistanceToFirstUse() { |
| 336 | DCHECK(first_use_ >= 0); |
| 337 | return assm_->pc_offset() - first_use_; |
| 338 | } |
| 339 | |
| 340 | |
| 341 | int ConstPool::MaxPcOffset() { |
| 342 | // There are no pending entries in the pool so we can never get out of |
| 343 | // range. |
| 344 | if (IsEmpty()) return kMaxInt; |
| 345 | |
| 346 | // Entries are not necessarily emitted in the order they are added so in the |
| 347 | // worst case the first constant pool use will be accessing the last entry. |
| 348 | return first_use_ + kMaxLoadLiteralRange - WorstCaseSize(); |
| 349 | } |
| 350 | |
| 351 | |
| 352 | int ConstPool::WorstCaseSize() { |
| 353 | if (IsEmpty()) return 0; |
| 354 | |
| 355 | // Max size prologue: |
| 356 | // b over |
| 357 | // ldr xzr, #pool_size |
| 358 | // blr xzr |
| 359 | // nop |
| 360 | // All entries are 64-bit for now. |
| 361 | return 4 * kInstructionSize + EntryCount() * kPointerSize; |
| 362 | } |
| 363 | |
| 364 | |
| 365 | int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) { |
| 366 | if (IsEmpty()) return 0; |
| 367 | |
| 368 | // Prologue is: |
| 369 | // b over ;; if require_jump |
| 370 | // ldr xzr, #pool_size |
| 371 | // blr xzr |
| 372 | // nop ;; if not 64-bit aligned |
| 373 | int prologue_size = require_jump ? kInstructionSize : 0; |
| 374 | prologue_size += 2 * kInstructionSize; |
| 375 | prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ? |
| 376 | 0 : kInstructionSize; |
| 377 | |
| 378 | // All entries are 64-bit for now. |
| 379 | return prologue_size + EntryCount() * kPointerSize; |
| 380 | } |
| 381 | |
| 382 | |
| 383 | void ConstPool::Emit(bool require_jump) { |
| 384 | DCHECK(!assm_->is_const_pool_blocked()); |
| 385 | // Prevent recursive pool emission and protect from veneer pools. |
| 386 | Assembler::BlockPoolsScope block_pools(assm_); |
| 387 | |
| 388 | int size = SizeIfEmittedAtCurrentPc(require_jump); |
| 389 | Label size_check; |
| 390 | assm_->bind(&size_check); |
| 391 | |
| 392 | assm_->RecordConstPool(size); |
| 393 | // Emit the constant pool. It is preceded by an optional branch if |
| 394 | // require_jump and a header which will: |
| 395 | // 1) Encode the size of the constant pool, for use by the disassembler. |
| 396 | // 2) Terminate the program, to try to prevent execution from accidentally |
| 397 | // flowing into the constant pool. |
| 398 | // 3) align the pool entries to 64-bit. |
| 399 | // The header is therefore made of up to three arm64 instructions: |
| 400 | // ldr xzr, #<size of the constant pool in 32-bit words> |
| 401 | // blr xzr |
| 402 | // nop |
| 403 | // |
| 404 | // If executed, the header will likely segfault and lr will point to the |
| 405 | // instruction following the offending blr. |
| 406 | // TODO(all): Make the alignment part less fragile. Currently code is |
| 407 | // allocated as a byte array so there are no guarantees the alignment will |
| 408 | // be preserved on compaction. Currently it works as allocation seems to be |
| 409 | // 64-bit aligned. |
| 410 | |
| 411 | // Emit branch if required |
| 412 | Label after_pool; |
| 413 | if (require_jump) { |
| 414 | assm_->b(&after_pool); |
| 415 | } |
| 416 | |
| 417 | // Emit the header. |
| 418 | assm_->RecordComment("[ Constant Pool"); |
| 419 | EmitMarker(); |
| 420 | EmitGuard(); |
| 421 | assm_->Align(8); |
| 422 | |
| 423 | // Emit constant pool entries. |
| 424 | // TODO(all): currently each relocated constant is 64 bits, consider adding |
| 425 | // support for 32-bit entries. |
| 426 | EmitEntries(); |
| 427 | assm_->RecordComment("]"); |
| 428 | |
| 429 | if (after_pool.is_linked()) { |
| 430 | assm_->bind(&after_pool); |
| 431 | } |
| 432 | |
| 433 | DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) == |
| 434 | static_cast<unsigned>(size)); |
| 435 | } |
| 436 | |
| 437 | |
| 438 | void ConstPool::Clear() { |
| 439 | shared_entries_.clear(); |
| 440 | shared_entries_count = 0; |
| 441 | unique_entries_.clear(); |
| 442 | first_use_ = -1; |
| 443 | } |
| 444 | |
| 445 | |
| 446 | bool ConstPool::CanBeShared(RelocInfo::Mode mode) { |
| 447 | // Constant pool currently does not support 32-bit entries. |
| 448 | DCHECK(mode != RelocInfo::NONE32); |
| 449 | |
| 450 | return RelocInfo::IsNone(mode) || |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 451 | (!assm_->serializer_enabled() && |
| 452 | (mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 453 | } |
| 454 | |
| 455 | |
| 456 | void ConstPool::EmitMarker() { |
| 457 | // A constant pool size is expressed in number of 32-bits words. |
| 458 | // Currently all entries are 64-bit. |
| 459 | // + 1 is for the crash guard. |
| 460 | // + 0/1 for alignment. |
| 461 | int word_count = EntryCount() * 2 + 1 + |
| 462 | (IsAligned(assm_->pc_offset(), 8) ? 0 : 1); |
| 463 | assm_->Emit(LDR_x_lit | |
| 464 | Assembler::ImmLLiteral(word_count) | |
| 465 | Assembler::Rt(xzr)); |
| 466 | } |
| 467 | |
| 468 | |
| 469 | MemOperand::PairResult MemOperand::AreConsistentForPair( |
| 470 | const MemOperand& operandA, |
| 471 | const MemOperand& operandB, |
| 472 | int access_size_log2) { |
| 473 | DCHECK(access_size_log2 >= 0); |
| 474 | DCHECK(access_size_log2 <= 3); |
| 475 | // Step one: check that they share the same base, that the mode is Offset |
| 476 | // and that the offset is a multiple of access size. |
| 477 | if (!operandA.base().Is(operandB.base()) || |
| 478 | (operandA.addrmode() != Offset) || |
| 479 | (operandB.addrmode() != Offset) || |
| 480 | ((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) { |
| 481 | return kNotPair; |
| 482 | } |
| 483 | // Step two: check that the offsets are contiguous and that the range |
| 484 | // is OK for ldp/stp. |
| 485 | if ((operandB.offset() == operandA.offset() + (1 << access_size_log2)) && |
| 486 | is_int7(operandA.offset() >> access_size_log2)) { |
| 487 | return kPairAB; |
| 488 | } |
| 489 | if ((operandA.offset() == operandB.offset() + (1 << access_size_log2)) && |
| 490 | is_int7(operandB.offset() >> access_size_log2)) { |
| 491 | return kPairBA; |
| 492 | } |
| 493 | return kNotPair; |
| 494 | } |
| 495 | |
| 496 | |
| 497 | void ConstPool::EmitGuard() { |
| 498 | #ifdef DEBUG |
| 499 | Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc()); |
| 500 | DCHECK(instr->preceding()->IsLdrLiteralX() && |
| 501 | instr->preceding()->Rt() == xzr.code()); |
| 502 | #endif |
| 503 | assm_->EmitPoolGuard(); |
| 504 | } |
| 505 | |
| 506 | |
| 507 | void ConstPool::EmitEntries() { |
| 508 | DCHECK(IsAligned(assm_->pc_offset(), 8)); |
| 509 | |
| 510 | typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator; |
| 511 | SharedEntriesIterator value_it; |
| 512 | // Iterate through the keys (constant pool values). |
| 513 | for (value_it = shared_entries_.begin(); |
| 514 | value_it != shared_entries_.end(); |
| 515 | value_it = shared_entries_.upper_bound(value_it->first)) { |
| 516 | std::pair<SharedEntriesIterator, SharedEntriesIterator> range; |
| 517 | uint64_t data = value_it->first; |
| 518 | range = shared_entries_.equal_range(data); |
| 519 | SharedEntriesIterator offset_it; |
| 520 | // Iterate through the offsets of a given key. |
| 521 | for (offset_it = range.first; offset_it != range.second; offset_it++) { |
| 522 | Instruction* instr = assm_->InstructionAt(offset_it->second); |
| 523 | |
| 524 | // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. |
| 525 | DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 526 | instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 527 | } |
| 528 | assm_->dc64(data); |
| 529 | } |
| 530 | shared_entries_.clear(); |
| 531 | shared_entries_count = 0; |
| 532 | |
| 533 | // Emit unique entries. |
| 534 | std::vector<std::pair<uint64_t, int> >::const_iterator unique_it; |
| 535 | for (unique_it = unique_entries_.begin(); |
| 536 | unique_it != unique_entries_.end(); |
| 537 | unique_it++) { |
| 538 | Instruction* instr = assm_->InstructionAt(unique_it->second); |
| 539 | |
| 540 | // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. |
| 541 | DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 542 | instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 543 | assm_->dc64(unique_it->first); |
| 544 | } |
| 545 | unique_entries_.clear(); |
| 546 | first_use_ = -1; |
| 547 | } |
| 548 | |
| 549 | |
| 550 | // Assembler |
| 551 | Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
| 552 | : AssemblerBase(isolate, buffer, buffer_size), |
| 553 | constpool_(this), |
| 554 | recorded_ast_id_(TypeFeedbackId::None()), |
| 555 | unresolved_branches_(), |
| 556 | positions_recorder_(this) { |
| 557 | const_pool_blocked_nesting_ = 0; |
| 558 | veneer_pool_blocked_nesting_ = 0; |
| 559 | Reset(); |
| 560 | } |
| 561 | |
| 562 | |
| 563 | Assembler::~Assembler() { |
| 564 | DCHECK(constpool_.IsEmpty()); |
| 565 | DCHECK(const_pool_blocked_nesting_ == 0); |
| 566 | DCHECK(veneer_pool_blocked_nesting_ == 0); |
| 567 | } |
| 568 | |
| 569 | |
| 570 | void Assembler::Reset() { |
| 571 | #ifdef DEBUG |
| 572 | DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); |
| 573 | DCHECK(const_pool_blocked_nesting_ == 0); |
| 574 | DCHECK(veneer_pool_blocked_nesting_ == 0); |
| 575 | DCHECK(unresolved_branches_.empty()); |
| 576 | memset(buffer_, 0, pc_ - buffer_); |
| 577 | #endif |
| 578 | pc_ = buffer_; |
| 579 | reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_), |
| 580 | reinterpret_cast<byte*>(pc_)); |
| 581 | constpool_.Clear(); |
| 582 | next_constant_pool_check_ = 0; |
| 583 | next_veneer_pool_check_ = kMaxInt; |
| 584 | no_const_pool_before_ = 0; |
| 585 | ClearRecordedAstId(); |
| 586 | } |
| 587 | |
| 588 | |
| 589 | void Assembler::GetCode(CodeDesc* desc) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 590 | reloc_info_writer.Finish(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 591 | // Emit constant pool if necessary. |
| 592 | CheckConstPool(true, false); |
| 593 | DCHECK(constpool_.IsEmpty()); |
| 594 | |
| 595 | // Set up code descriptor. |
| 596 | if (desc) { |
| 597 | desc->buffer = reinterpret_cast<byte*>(buffer_); |
| 598 | desc->buffer_size = buffer_size_; |
| 599 | desc->instr_size = pc_offset(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 600 | desc->reloc_size = |
| 601 | static_cast<int>((reinterpret_cast<byte*>(buffer_) + buffer_size_) - |
| 602 | reloc_info_writer.pos()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 603 | desc->origin = this; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 604 | desc->constant_pool_size = 0; |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 605 | desc->unwinding_info_size = 0; |
| 606 | desc->unwinding_info = nullptr; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 607 | } |
| 608 | } |
| 609 | |
| 610 | |
| 611 | void Assembler::Align(int m) { |
| 612 | DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m)); |
| 613 | while ((pc_offset() & (m - 1)) != 0) { |
| 614 | nop(); |
| 615 | } |
| 616 | } |
| 617 | |
| 618 | |
| 619 | void Assembler::CheckLabelLinkChain(Label const * label) { |
| 620 | #ifdef DEBUG |
| 621 | if (label->is_linked()) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 622 | static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour. |
| 623 | int links_checked = 0; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 624 | int64_t linkoffset = label->pos(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 625 | bool end_of_chain = false; |
| 626 | while (!end_of_chain) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 627 | if (++links_checked > kMaxLinksToCheck) break; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 628 | Instruction * link = InstructionAt(linkoffset); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 629 | int64_t linkpcoffset = link->ImmPCOffset(); |
| 630 | int64_t prevlinkoffset = linkoffset + linkpcoffset; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 631 | |
| 632 | end_of_chain = (linkoffset == prevlinkoffset); |
| 633 | linkoffset = linkoffset + linkpcoffset; |
| 634 | } |
| 635 | } |
| 636 | #endif |
| 637 | } |
| 638 | |
| 639 | |
| 640 | void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch, |
| 641 | Label* label, |
| 642 | Instruction* label_veneer) { |
| 643 | DCHECK(label->is_linked()); |
| 644 | |
| 645 | CheckLabelLinkChain(label); |
| 646 | |
| 647 | Instruction* link = InstructionAt(label->pos()); |
| 648 | Instruction* prev_link = link; |
| 649 | Instruction* next_link; |
| 650 | bool end_of_chain = false; |
| 651 | |
| 652 | while (link != branch && !end_of_chain) { |
| 653 | next_link = link->ImmPCOffsetTarget(); |
| 654 | end_of_chain = (link == next_link); |
| 655 | prev_link = link; |
| 656 | link = next_link; |
| 657 | } |
| 658 | |
| 659 | DCHECK(branch == link); |
| 660 | next_link = branch->ImmPCOffsetTarget(); |
| 661 | |
| 662 | if (branch == prev_link) { |
| 663 | // The branch is the first instruction in the chain. |
| 664 | if (branch == next_link) { |
| 665 | // It is also the last instruction in the chain, so it is the only branch |
| 666 | // currently referring to this label. |
| 667 | label->Unuse(); |
| 668 | } else { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 669 | label->link_to( |
| 670 | static_cast<int>(reinterpret_cast<byte*>(next_link) - buffer_)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 671 | } |
| 672 | |
| 673 | } else if (branch == next_link) { |
| 674 | // The branch is the last (but not also the first) instruction in the chain. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 675 | prev_link->SetImmPCOffsetTarget(isolate(), prev_link); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 676 | |
| 677 | } else { |
| 678 | // The branch is in the middle of the chain. |
| 679 | if (prev_link->IsTargetInImmPCOffsetRange(next_link)) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 680 | prev_link->SetImmPCOffsetTarget(isolate(), next_link); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 681 | } else if (label_veneer != NULL) { |
| 682 | // Use the veneer for all previous links in the chain. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 683 | prev_link->SetImmPCOffsetTarget(isolate(), prev_link); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 684 | |
| 685 | end_of_chain = false; |
| 686 | link = next_link; |
| 687 | while (!end_of_chain) { |
| 688 | next_link = link->ImmPCOffsetTarget(); |
| 689 | end_of_chain = (link == next_link); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 690 | link->SetImmPCOffsetTarget(isolate(), label_veneer); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 691 | link = next_link; |
| 692 | } |
| 693 | } else { |
| 694 | // The assert below will fire. |
| 695 | // Some other work could be attempted to fix up the chain, but it would be |
| 696 | // rather complicated. If we crash here, we may want to consider using an |
| 697 | // other mechanism than a chain of branches. |
| 698 | // |
| 699 | // Note that this situation currently should not happen, as we always call |
| 700 | // this function with a veneer to the target label. |
| 701 | // However this could happen with a MacroAssembler in the following state: |
| 702 | // [previous code] |
| 703 | // B(label); |
| 704 | // [20KB code] |
| 705 | // Tbz(label); // First tbz. Pointing to unconditional branch. |
| 706 | // [20KB code] |
| 707 | // Tbz(label); // Second tbz. Pointing to the first tbz. |
| 708 | // [more code] |
| 709 | // and this function is called to remove the first tbz from the label link |
| 710 | // chain. Since tbz has a range of +-32KB, the second tbz cannot point to |
| 711 | // the unconditional branch. |
| 712 | CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link)); |
| 713 | UNREACHABLE(); |
| 714 | } |
| 715 | } |
| 716 | |
| 717 | CheckLabelLinkChain(label); |
| 718 | } |
| 719 | |
| 720 | |
| 721 | void Assembler::bind(Label* label) { |
| 722 | // Bind label to the address at pc_. All instructions (most likely branches) |
| 723 | // that are linked to this label will be updated to point to the newly-bound |
| 724 | // label. |
| 725 | |
| 726 | DCHECK(!label->is_near_linked()); |
| 727 | DCHECK(!label->is_bound()); |
| 728 | |
| 729 | DeleteUnresolvedBranchInfoForLabel(label); |
| 730 | |
| 731 | // If the label is linked, the link chain looks something like this: |
| 732 | // |
| 733 | // |--I----I-------I-------L |
| 734 | // |---------------------->| pc_offset |
| 735 | // |-------------->| linkoffset = label->pos() |
| 736 | // |<------| link->ImmPCOffset() |
| 737 | // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset() |
| 738 | // |
| 739 | // On each iteration, the last link is updated and then removed from the |
| 740 | // chain until only one remains. At that point, the label is bound. |
| 741 | // |
| 742 | // If the label is not linked, no preparation is required before binding. |
| 743 | while (label->is_linked()) { |
| 744 | int linkoffset = label->pos(); |
| 745 | Instruction* link = InstructionAt(linkoffset); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 746 | int prevlinkoffset = linkoffset + static_cast<int>(link->ImmPCOffset()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 747 | |
| 748 | CheckLabelLinkChain(label); |
| 749 | |
| 750 | DCHECK(linkoffset >= 0); |
| 751 | DCHECK(linkoffset < pc_offset()); |
| 752 | DCHECK((linkoffset > prevlinkoffset) || |
| 753 | (linkoffset - prevlinkoffset == kStartOfLabelLinkChain)); |
| 754 | DCHECK(prevlinkoffset >= 0); |
| 755 | |
| 756 | // Update the link to point to the label. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 757 | if (link->IsUnresolvedInternalReference()) { |
| 758 | // Internal references do not get patched to an instruction but directly |
| 759 | // to an address. |
| 760 | internal_reference_positions_.push_back(linkoffset); |
| 761 | PatchingAssembler patcher(isolate(), link, 2); |
| 762 | patcher.dc64(reinterpret_cast<uintptr_t>(pc_)); |
| 763 | } else { |
| 764 | link->SetImmPCOffsetTarget(isolate(), |
| 765 | reinterpret_cast<Instruction*>(pc_)); |
| 766 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 767 | |
| 768 | // Link the label to the previous link in the chain. |
| 769 | if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) { |
| 770 | // We hit kStartOfLabelLinkChain, so the chain is fully processed. |
| 771 | label->Unuse(); |
| 772 | } else { |
| 773 | // Update the label for the next iteration. |
| 774 | label->link_to(prevlinkoffset); |
| 775 | } |
| 776 | } |
| 777 | label->bind_to(pc_offset()); |
| 778 | |
| 779 | DCHECK(label->is_bound()); |
| 780 | DCHECK(!label->is_linked()); |
| 781 | } |
| 782 | |
| 783 | |
| 784 | int Assembler::LinkAndGetByteOffsetTo(Label* label) { |
| 785 | DCHECK(sizeof(*pc_) == 1); |
| 786 | CheckLabelLinkChain(label); |
| 787 | |
| 788 | int offset; |
| 789 | if (label->is_bound()) { |
| 790 | // The label is bound, so it does not need to be updated. Referring |
| 791 | // instructions must link directly to the label as they will not be |
| 792 | // updated. |
| 793 | // |
| 794 | // In this case, label->pos() returns the offset of the label from the |
| 795 | // start of the buffer. |
| 796 | // |
| 797 | // Note that offset can be zero for self-referential instructions. (This |
| 798 | // could be useful for ADR, for example.) |
| 799 | offset = label->pos() - pc_offset(); |
| 800 | DCHECK(offset <= 0); |
| 801 | } else { |
| 802 | if (label->is_linked()) { |
| 803 | // The label is linked, so the referring instruction should be added onto |
| 804 | // the end of the label's link chain. |
| 805 | // |
| 806 | // In this case, label->pos() returns the offset of the last linked |
| 807 | // instruction from the start of the buffer. |
| 808 | offset = label->pos() - pc_offset(); |
| 809 | DCHECK(offset != kStartOfLabelLinkChain); |
| 810 | // Note that the offset here needs to be PC-relative only so that the |
| 811 | // first instruction in a buffer can link to an unbound label. Otherwise, |
| 812 | // the offset would be 0 for this case, and 0 is reserved for |
| 813 | // kStartOfLabelLinkChain. |
| 814 | } else { |
| 815 | // The label is unused, so it now becomes linked and the referring |
| 816 | // instruction is at the start of the new link chain. |
| 817 | offset = kStartOfLabelLinkChain; |
| 818 | } |
| 819 | // The instruction at pc is now the last link in the label's chain. |
| 820 | label->link_to(pc_offset()); |
| 821 | } |
| 822 | |
| 823 | return offset; |
| 824 | } |
| 825 | |
| 826 | |
| 827 | void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) { |
| 828 | DCHECK(label->is_linked()); |
| 829 | CheckLabelLinkChain(label); |
| 830 | |
| 831 | int link_offset = label->pos(); |
| 832 | int link_pcoffset; |
| 833 | bool end_of_chain = false; |
| 834 | |
| 835 | while (!end_of_chain) { |
| 836 | Instruction * link = InstructionAt(link_offset); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 837 | link_pcoffset = static_cast<int>(link->ImmPCOffset()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 838 | |
| 839 | // ADR instructions are not handled by veneers. |
| 840 | if (link->IsImmBranch()) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 841 | int max_reachable_pc = |
| 842 | static_cast<int>(InstructionOffset(link) + |
| 843 | Instruction::ImmBranchRange(link->BranchType())); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 844 | typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it; |
| 845 | std::pair<unresolved_info_it, unresolved_info_it> range; |
| 846 | range = unresolved_branches_.equal_range(max_reachable_pc); |
| 847 | unresolved_info_it it; |
| 848 | for (it = range.first; it != range.second; ++it) { |
| 849 | if (it->second.pc_offset_ == link_offset) { |
| 850 | unresolved_branches_.erase(it); |
| 851 | break; |
| 852 | } |
| 853 | } |
| 854 | } |
| 855 | |
| 856 | end_of_chain = (link_pcoffset == 0); |
| 857 | link_offset = link_offset + link_pcoffset; |
| 858 | } |
| 859 | } |
| 860 | |
| 861 | |
| 862 | void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) { |
| 863 | if (unresolved_branches_.empty()) { |
| 864 | DCHECK(next_veneer_pool_check_ == kMaxInt); |
| 865 | return; |
| 866 | } |
| 867 | |
| 868 | if (label->is_linked()) { |
| 869 | // Branches to this label will be resolved when the label is bound, normally |
| 870 | // just after all the associated info has been deleted. |
| 871 | DeleteUnresolvedBranchInfoForLabelTraverse(label); |
| 872 | } |
| 873 | if (unresolved_branches_.empty()) { |
| 874 | next_veneer_pool_check_ = kMaxInt; |
| 875 | } else { |
| 876 | next_veneer_pool_check_ = |
| 877 | unresolved_branches_first_limit() - kVeneerDistanceCheckMargin; |
| 878 | } |
| 879 | } |
| 880 | |
| 881 | |
| 882 | void Assembler::StartBlockConstPool() { |
| 883 | if (const_pool_blocked_nesting_++ == 0) { |
| 884 | // Prevent constant pool checks happening by setting the next check to |
| 885 | // the biggest possible offset. |
| 886 | next_constant_pool_check_ = kMaxInt; |
| 887 | } |
| 888 | } |
| 889 | |
| 890 | |
| 891 | void Assembler::EndBlockConstPool() { |
| 892 | if (--const_pool_blocked_nesting_ == 0) { |
| 893 | // Check the constant pool hasn't been blocked for too long. |
| 894 | DCHECK(pc_offset() < constpool_.MaxPcOffset()); |
| 895 | // Two cases: |
| 896 | // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is |
| 897 | // still blocked |
| 898 | // * no_const_pool_before_ < next_constant_pool_check_ and the next emit |
| 899 | // will trigger a check. |
| 900 | next_constant_pool_check_ = no_const_pool_before_; |
| 901 | } |
| 902 | } |
| 903 | |
| 904 | |
| 905 | bool Assembler::is_const_pool_blocked() const { |
| 906 | return (const_pool_blocked_nesting_ > 0) || |
| 907 | (pc_offset() < no_const_pool_before_); |
| 908 | } |
| 909 | |
| 910 | |
| 911 | bool Assembler::IsConstantPoolAt(Instruction* instr) { |
| 912 | // The constant pool marker is made of two instructions. These instructions |
| 913 | // will never be emitted by the JIT, so checking for the first one is enough: |
| 914 | // 0: ldr xzr, #<size of pool> |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 915 | bool result = instr->IsLdrLiteralX() && (instr->Rt() == kZeroRegCode); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 916 | |
| 917 | // It is still worth asserting the marker is complete. |
| 918 | // 4: blr xzr |
| 919 | DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() && |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 920 | instr->following()->Rn() == kZeroRegCode)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 921 | |
| 922 | return result; |
| 923 | } |
| 924 | |
| 925 | |
| 926 | int Assembler::ConstantPoolSizeAt(Instruction* instr) { |
| 927 | #ifdef USE_SIMULATOR |
| 928 | // Assembler::debug() embeds constants directly into the instruction stream. |
| 929 | // Although this is not a genuine constant pool, treat it like one to avoid |
| 930 | // disassembling the constants. |
| 931 | if ((instr->Mask(ExceptionMask) == HLT) && |
| 932 | (instr->ImmException() == kImmExceptionIsDebug)) { |
| 933 | const char* message = |
| 934 | reinterpret_cast<const char*>( |
| 935 | instr->InstructionAtOffset(kDebugMessageOffset)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 936 | int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 937 | return RoundUp(size, kInstructionSize) / kInstructionSize; |
| 938 | } |
| 939 | // Same for printf support, see MacroAssembler::CallPrintf(). |
| 940 | if ((instr->Mask(ExceptionMask) == HLT) && |
| 941 | (instr->ImmException() == kImmExceptionIsPrintf)) { |
| 942 | return kPrintfLength / kInstructionSize; |
| 943 | } |
| 944 | #endif |
| 945 | if (IsConstantPoolAt(instr)) { |
| 946 | return instr->ImmLLiteral(); |
| 947 | } else { |
| 948 | return -1; |
| 949 | } |
| 950 | } |
| 951 | |
| 952 | |
| 953 | void Assembler::EmitPoolGuard() { |
| 954 | // We must generate only one instruction as this is used in scopes that |
| 955 | // control the size of the code generated. |
| 956 | Emit(BLR | Rn(xzr)); |
| 957 | } |
| 958 | |
| 959 | |
| 960 | void Assembler::StartBlockVeneerPool() { |
| 961 | ++veneer_pool_blocked_nesting_; |
| 962 | } |
| 963 | |
| 964 | |
| 965 | void Assembler::EndBlockVeneerPool() { |
| 966 | if (--veneer_pool_blocked_nesting_ == 0) { |
| 967 | // Check the veneer pool hasn't been blocked for too long. |
| 968 | DCHECK(unresolved_branches_.empty() || |
| 969 | (pc_offset() < unresolved_branches_first_limit())); |
| 970 | } |
| 971 | } |
| 972 | |
| 973 | |
| 974 | void Assembler::br(const Register& xn) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 975 | DCHECK(xn.Is64Bits()); |
| 976 | Emit(BR | Rn(xn)); |
| 977 | } |
| 978 | |
| 979 | |
| 980 | void Assembler::blr(const Register& xn) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 981 | DCHECK(xn.Is64Bits()); |
| 982 | // The pattern 'blr xzr' is used as a guard to detect when execution falls |
| 983 | // through the constant pool. It should not be emitted. |
| 984 | DCHECK(!xn.Is(xzr)); |
| 985 | Emit(BLR | Rn(xn)); |
| 986 | } |
| 987 | |
| 988 | |
| 989 | void Assembler::ret(const Register& xn) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 990 | DCHECK(xn.Is64Bits()); |
| 991 | Emit(RET | Rn(xn)); |
| 992 | } |
| 993 | |
| 994 | |
| 995 | void Assembler::b(int imm26) { |
| 996 | Emit(B | ImmUncondBranch(imm26)); |
| 997 | } |
| 998 | |
| 999 | |
| 1000 | void Assembler::b(Label* label) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1001 | b(LinkAndGetInstructionOffsetTo(label)); |
| 1002 | } |
| 1003 | |
| 1004 | |
| 1005 | void Assembler::b(int imm19, Condition cond) { |
| 1006 | Emit(B_cond | ImmCondBranch(imm19) | cond); |
| 1007 | } |
| 1008 | |
| 1009 | |
| 1010 | void Assembler::b(Label* label, Condition cond) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1011 | b(LinkAndGetInstructionOffsetTo(label), cond); |
| 1012 | } |
| 1013 | |
| 1014 | |
| 1015 | void Assembler::bl(int imm26) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1016 | Emit(BL | ImmUncondBranch(imm26)); |
| 1017 | } |
| 1018 | |
| 1019 | |
| 1020 | void Assembler::bl(Label* label) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1021 | bl(LinkAndGetInstructionOffsetTo(label)); |
| 1022 | } |
| 1023 | |
| 1024 | |
| 1025 | void Assembler::cbz(const Register& rt, |
| 1026 | int imm19) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1027 | Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt)); |
| 1028 | } |
| 1029 | |
| 1030 | |
| 1031 | void Assembler::cbz(const Register& rt, |
| 1032 | Label* label) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1033 | cbz(rt, LinkAndGetInstructionOffsetTo(label)); |
| 1034 | } |
| 1035 | |
| 1036 | |
| 1037 | void Assembler::cbnz(const Register& rt, |
| 1038 | int imm19) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1039 | Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt)); |
| 1040 | } |
| 1041 | |
| 1042 | |
| 1043 | void Assembler::cbnz(const Register& rt, |
| 1044 | Label* label) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1045 | cbnz(rt, LinkAndGetInstructionOffsetTo(label)); |
| 1046 | } |
| 1047 | |
| 1048 | |
| 1049 | void Assembler::tbz(const Register& rt, |
| 1050 | unsigned bit_pos, |
| 1051 | int imm14) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1052 | DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits))); |
| 1053 | Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); |
| 1054 | } |
| 1055 | |
| 1056 | |
| 1057 | void Assembler::tbz(const Register& rt, |
| 1058 | unsigned bit_pos, |
| 1059 | Label* label) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1060 | tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); |
| 1061 | } |
| 1062 | |
| 1063 | |
| 1064 | void Assembler::tbnz(const Register& rt, |
| 1065 | unsigned bit_pos, |
| 1066 | int imm14) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1067 | DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits))); |
| 1068 | Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); |
| 1069 | } |
| 1070 | |
| 1071 | |
| 1072 | void Assembler::tbnz(const Register& rt, |
| 1073 | unsigned bit_pos, |
| 1074 | Label* label) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1075 | tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); |
| 1076 | } |
| 1077 | |
| 1078 | |
| 1079 | void Assembler::adr(const Register& rd, int imm21) { |
| 1080 | DCHECK(rd.Is64Bits()); |
| 1081 | Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd)); |
| 1082 | } |
| 1083 | |
| 1084 | |
| 1085 | void Assembler::adr(const Register& rd, Label* label) { |
| 1086 | adr(rd, LinkAndGetByteOffsetTo(label)); |
| 1087 | } |
| 1088 | |
| 1089 | |
| 1090 | void Assembler::add(const Register& rd, |
| 1091 | const Register& rn, |
| 1092 | const Operand& operand) { |
| 1093 | AddSub(rd, rn, operand, LeaveFlags, ADD); |
| 1094 | } |
| 1095 | |
| 1096 | |
| 1097 | void Assembler::adds(const Register& rd, |
| 1098 | const Register& rn, |
| 1099 | const Operand& operand) { |
| 1100 | AddSub(rd, rn, operand, SetFlags, ADD); |
| 1101 | } |
| 1102 | |
| 1103 | |
| 1104 | void Assembler::cmn(const Register& rn, |
| 1105 | const Operand& operand) { |
| 1106 | Register zr = AppropriateZeroRegFor(rn); |
| 1107 | adds(zr, rn, operand); |
| 1108 | } |
| 1109 | |
| 1110 | |
| 1111 | void Assembler::sub(const Register& rd, |
| 1112 | const Register& rn, |
| 1113 | const Operand& operand) { |
| 1114 | AddSub(rd, rn, operand, LeaveFlags, SUB); |
| 1115 | } |
| 1116 | |
| 1117 | |
| 1118 | void Assembler::subs(const Register& rd, |
| 1119 | const Register& rn, |
| 1120 | const Operand& operand) { |
| 1121 | AddSub(rd, rn, operand, SetFlags, SUB); |
| 1122 | } |
| 1123 | |
| 1124 | |
| 1125 | void Assembler::cmp(const Register& rn, const Operand& operand) { |
| 1126 | Register zr = AppropriateZeroRegFor(rn); |
| 1127 | subs(zr, rn, operand); |
| 1128 | } |
| 1129 | |
| 1130 | |
| 1131 | void Assembler::neg(const Register& rd, const Operand& operand) { |
| 1132 | Register zr = AppropriateZeroRegFor(rd); |
| 1133 | sub(rd, zr, operand); |
| 1134 | } |
| 1135 | |
| 1136 | |
| 1137 | void Assembler::negs(const Register& rd, const Operand& operand) { |
| 1138 | Register zr = AppropriateZeroRegFor(rd); |
| 1139 | subs(rd, zr, operand); |
| 1140 | } |
| 1141 | |
| 1142 | |
| 1143 | void Assembler::adc(const Register& rd, |
| 1144 | const Register& rn, |
| 1145 | const Operand& operand) { |
| 1146 | AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC); |
| 1147 | } |
| 1148 | |
| 1149 | |
| 1150 | void Assembler::adcs(const Register& rd, |
| 1151 | const Register& rn, |
| 1152 | const Operand& operand) { |
| 1153 | AddSubWithCarry(rd, rn, operand, SetFlags, ADC); |
| 1154 | } |
| 1155 | |
| 1156 | |
| 1157 | void Assembler::sbc(const Register& rd, |
| 1158 | const Register& rn, |
| 1159 | const Operand& operand) { |
| 1160 | AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC); |
| 1161 | } |
| 1162 | |
| 1163 | |
| 1164 | void Assembler::sbcs(const Register& rd, |
| 1165 | const Register& rn, |
| 1166 | const Operand& operand) { |
| 1167 | AddSubWithCarry(rd, rn, operand, SetFlags, SBC); |
| 1168 | } |
| 1169 | |
| 1170 | |
| 1171 | void Assembler::ngc(const Register& rd, const Operand& operand) { |
| 1172 | Register zr = AppropriateZeroRegFor(rd); |
| 1173 | sbc(rd, zr, operand); |
| 1174 | } |
| 1175 | |
| 1176 | |
| 1177 | void Assembler::ngcs(const Register& rd, const Operand& operand) { |
| 1178 | Register zr = AppropriateZeroRegFor(rd); |
| 1179 | sbcs(rd, zr, operand); |
| 1180 | } |
| 1181 | |
| 1182 | |
| 1183 | // Logical instructions. |
| 1184 | void Assembler::and_(const Register& rd, |
| 1185 | const Register& rn, |
| 1186 | const Operand& operand) { |
| 1187 | Logical(rd, rn, operand, AND); |
| 1188 | } |
| 1189 | |
| 1190 | |
| 1191 | void Assembler::ands(const Register& rd, |
| 1192 | const Register& rn, |
| 1193 | const Operand& operand) { |
| 1194 | Logical(rd, rn, operand, ANDS); |
| 1195 | } |
| 1196 | |
| 1197 | |
| 1198 | void Assembler::tst(const Register& rn, |
| 1199 | const Operand& operand) { |
| 1200 | ands(AppropriateZeroRegFor(rn), rn, operand); |
| 1201 | } |
| 1202 | |
| 1203 | |
| 1204 | void Assembler::bic(const Register& rd, |
| 1205 | const Register& rn, |
| 1206 | const Operand& operand) { |
| 1207 | Logical(rd, rn, operand, BIC); |
| 1208 | } |
| 1209 | |
| 1210 | |
| 1211 | void Assembler::bics(const Register& rd, |
| 1212 | const Register& rn, |
| 1213 | const Operand& operand) { |
| 1214 | Logical(rd, rn, operand, BICS); |
| 1215 | } |
| 1216 | |
| 1217 | |
| 1218 | void Assembler::orr(const Register& rd, |
| 1219 | const Register& rn, |
| 1220 | const Operand& operand) { |
| 1221 | Logical(rd, rn, operand, ORR); |
| 1222 | } |
| 1223 | |
| 1224 | |
| 1225 | void Assembler::orn(const Register& rd, |
| 1226 | const Register& rn, |
| 1227 | const Operand& operand) { |
| 1228 | Logical(rd, rn, operand, ORN); |
| 1229 | } |
| 1230 | |
| 1231 | |
| 1232 | void Assembler::eor(const Register& rd, |
| 1233 | const Register& rn, |
| 1234 | const Operand& operand) { |
| 1235 | Logical(rd, rn, operand, EOR); |
| 1236 | } |
| 1237 | |
| 1238 | |
| 1239 | void Assembler::eon(const Register& rd, |
| 1240 | const Register& rn, |
| 1241 | const Operand& operand) { |
| 1242 | Logical(rd, rn, operand, EON); |
| 1243 | } |
| 1244 | |
| 1245 | |
| 1246 | void Assembler::lslv(const Register& rd, |
| 1247 | const Register& rn, |
| 1248 | const Register& rm) { |
| 1249 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 1250 | DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| 1251 | Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd)); |
| 1252 | } |
| 1253 | |
| 1254 | |
| 1255 | void Assembler::lsrv(const Register& rd, |
| 1256 | const Register& rn, |
| 1257 | const Register& rm) { |
| 1258 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 1259 | DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| 1260 | Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd)); |
| 1261 | } |
| 1262 | |
| 1263 | |
| 1264 | void Assembler::asrv(const Register& rd, |
| 1265 | const Register& rn, |
| 1266 | const Register& rm) { |
| 1267 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 1268 | DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| 1269 | Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd)); |
| 1270 | } |
| 1271 | |
| 1272 | |
| 1273 | void Assembler::rorv(const Register& rd, |
| 1274 | const Register& rn, |
| 1275 | const Register& rm) { |
| 1276 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 1277 | DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| 1278 | Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd)); |
| 1279 | } |
| 1280 | |
| 1281 | |
| 1282 | // Bitfield operations. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1283 | void Assembler::bfm(const Register& rd, const Register& rn, int immr, |
| 1284 | int imms) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1285 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 1286 | Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); |
| 1287 | Emit(SF(rd) | BFM | N | |
| 1288 | ImmR(immr, rd.SizeInBits()) | |
| 1289 | ImmS(imms, rn.SizeInBits()) | |
| 1290 | Rn(rn) | Rd(rd)); |
| 1291 | } |
| 1292 | |
| 1293 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1294 | void Assembler::sbfm(const Register& rd, const Register& rn, int immr, |
| 1295 | int imms) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1296 | DCHECK(rd.Is64Bits() || rn.Is32Bits()); |
| 1297 | Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); |
| 1298 | Emit(SF(rd) | SBFM | N | |
| 1299 | ImmR(immr, rd.SizeInBits()) | |
| 1300 | ImmS(imms, rn.SizeInBits()) | |
| 1301 | Rn(rn) | Rd(rd)); |
| 1302 | } |
| 1303 | |
| 1304 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1305 | void Assembler::ubfm(const Register& rd, const Register& rn, int immr, |
| 1306 | int imms) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1307 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 1308 | Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); |
| 1309 | Emit(SF(rd) | UBFM | N | |
| 1310 | ImmR(immr, rd.SizeInBits()) | |
| 1311 | ImmS(imms, rn.SizeInBits()) | |
| 1312 | Rn(rn) | Rd(rd)); |
| 1313 | } |
| 1314 | |
| 1315 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1316 | void Assembler::extr(const Register& rd, const Register& rn, const Register& rm, |
| 1317 | int lsb) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1318 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 1319 | DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| 1320 | Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); |
| 1321 | Emit(SF(rd) | EXTR | N | Rm(rm) | |
| 1322 | ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd)); |
| 1323 | } |
| 1324 | |
| 1325 | |
| 1326 | void Assembler::csel(const Register& rd, |
| 1327 | const Register& rn, |
| 1328 | const Register& rm, |
| 1329 | Condition cond) { |
| 1330 | ConditionalSelect(rd, rn, rm, cond, CSEL); |
| 1331 | } |
| 1332 | |
| 1333 | |
| 1334 | void Assembler::csinc(const Register& rd, |
| 1335 | const Register& rn, |
| 1336 | const Register& rm, |
| 1337 | Condition cond) { |
| 1338 | ConditionalSelect(rd, rn, rm, cond, CSINC); |
| 1339 | } |
| 1340 | |
| 1341 | |
| 1342 | void Assembler::csinv(const Register& rd, |
| 1343 | const Register& rn, |
| 1344 | const Register& rm, |
| 1345 | Condition cond) { |
| 1346 | ConditionalSelect(rd, rn, rm, cond, CSINV); |
| 1347 | } |
| 1348 | |
| 1349 | |
| 1350 | void Assembler::csneg(const Register& rd, |
| 1351 | const Register& rn, |
| 1352 | const Register& rm, |
| 1353 | Condition cond) { |
| 1354 | ConditionalSelect(rd, rn, rm, cond, CSNEG); |
| 1355 | } |
| 1356 | |
| 1357 | |
| 1358 | void Assembler::cset(const Register &rd, Condition cond) { |
| 1359 | DCHECK((cond != al) && (cond != nv)); |
| 1360 | Register zr = AppropriateZeroRegFor(rd); |
| 1361 | csinc(rd, zr, zr, NegateCondition(cond)); |
| 1362 | } |
| 1363 | |
| 1364 | |
| 1365 | void Assembler::csetm(const Register &rd, Condition cond) { |
| 1366 | DCHECK((cond != al) && (cond != nv)); |
| 1367 | Register zr = AppropriateZeroRegFor(rd); |
| 1368 | csinv(rd, zr, zr, NegateCondition(cond)); |
| 1369 | } |
| 1370 | |
| 1371 | |
| 1372 | void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) { |
| 1373 | DCHECK((cond != al) && (cond != nv)); |
| 1374 | csinc(rd, rn, rn, NegateCondition(cond)); |
| 1375 | } |
| 1376 | |
| 1377 | |
| 1378 | void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) { |
| 1379 | DCHECK((cond != al) && (cond != nv)); |
| 1380 | csinv(rd, rn, rn, NegateCondition(cond)); |
| 1381 | } |
| 1382 | |
| 1383 | |
| 1384 | void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) { |
| 1385 | DCHECK((cond != al) && (cond != nv)); |
| 1386 | csneg(rd, rn, rn, NegateCondition(cond)); |
| 1387 | } |
| 1388 | |
| 1389 | |
| 1390 | void Assembler::ConditionalSelect(const Register& rd, |
| 1391 | const Register& rn, |
| 1392 | const Register& rm, |
| 1393 | Condition cond, |
| 1394 | ConditionalSelectOp op) { |
| 1395 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 1396 | DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| 1397 | Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd)); |
| 1398 | } |
| 1399 | |
| 1400 | |
| 1401 | void Assembler::ccmn(const Register& rn, |
| 1402 | const Operand& operand, |
| 1403 | StatusFlags nzcv, |
| 1404 | Condition cond) { |
| 1405 | ConditionalCompare(rn, operand, nzcv, cond, CCMN); |
| 1406 | } |
| 1407 | |
| 1408 | |
| 1409 | void Assembler::ccmp(const Register& rn, |
| 1410 | const Operand& operand, |
| 1411 | StatusFlags nzcv, |
| 1412 | Condition cond) { |
| 1413 | ConditionalCompare(rn, operand, nzcv, cond, CCMP); |
| 1414 | } |
| 1415 | |
| 1416 | |
| 1417 | void Assembler::DataProcessing3Source(const Register& rd, |
| 1418 | const Register& rn, |
| 1419 | const Register& rm, |
| 1420 | const Register& ra, |
| 1421 | DataProcessing3SourceOp op) { |
| 1422 | Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd)); |
| 1423 | } |
| 1424 | |
| 1425 | |
| 1426 | void Assembler::mul(const Register& rd, |
| 1427 | const Register& rn, |
| 1428 | const Register& rm) { |
| 1429 | DCHECK(AreSameSizeAndType(rd, rn, rm)); |
| 1430 | Register zr = AppropriateZeroRegFor(rn); |
| 1431 | DataProcessing3Source(rd, rn, rm, zr, MADD); |
| 1432 | } |
| 1433 | |
| 1434 | |
| 1435 | void Assembler::madd(const Register& rd, |
| 1436 | const Register& rn, |
| 1437 | const Register& rm, |
| 1438 | const Register& ra) { |
| 1439 | DCHECK(AreSameSizeAndType(rd, rn, rm, ra)); |
| 1440 | DataProcessing3Source(rd, rn, rm, ra, MADD); |
| 1441 | } |
| 1442 | |
| 1443 | |
| 1444 | void Assembler::mneg(const Register& rd, |
| 1445 | const Register& rn, |
| 1446 | const Register& rm) { |
| 1447 | DCHECK(AreSameSizeAndType(rd, rn, rm)); |
| 1448 | Register zr = AppropriateZeroRegFor(rn); |
| 1449 | DataProcessing3Source(rd, rn, rm, zr, MSUB); |
| 1450 | } |
| 1451 | |
| 1452 | |
| 1453 | void Assembler::msub(const Register& rd, |
| 1454 | const Register& rn, |
| 1455 | const Register& rm, |
| 1456 | const Register& ra) { |
| 1457 | DCHECK(AreSameSizeAndType(rd, rn, rm, ra)); |
| 1458 | DataProcessing3Source(rd, rn, rm, ra, MSUB); |
| 1459 | } |
| 1460 | |
| 1461 | |
| 1462 | void Assembler::smaddl(const Register& rd, |
| 1463 | const Register& rn, |
| 1464 | const Register& rm, |
| 1465 | const Register& ra) { |
| 1466 | DCHECK(rd.Is64Bits() && ra.Is64Bits()); |
| 1467 | DCHECK(rn.Is32Bits() && rm.Is32Bits()); |
| 1468 | DataProcessing3Source(rd, rn, rm, ra, SMADDL_x); |
| 1469 | } |
| 1470 | |
| 1471 | |
| 1472 | void Assembler::smsubl(const Register& rd, |
| 1473 | const Register& rn, |
| 1474 | const Register& rm, |
| 1475 | const Register& ra) { |
| 1476 | DCHECK(rd.Is64Bits() && ra.Is64Bits()); |
| 1477 | DCHECK(rn.Is32Bits() && rm.Is32Bits()); |
| 1478 | DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x); |
| 1479 | } |
| 1480 | |
| 1481 | |
| 1482 | void Assembler::umaddl(const Register& rd, |
| 1483 | const Register& rn, |
| 1484 | const Register& rm, |
| 1485 | const Register& ra) { |
| 1486 | DCHECK(rd.Is64Bits() && ra.Is64Bits()); |
| 1487 | DCHECK(rn.Is32Bits() && rm.Is32Bits()); |
| 1488 | DataProcessing3Source(rd, rn, rm, ra, UMADDL_x); |
| 1489 | } |
| 1490 | |
| 1491 | |
| 1492 | void Assembler::umsubl(const Register& rd, |
| 1493 | const Register& rn, |
| 1494 | const Register& rm, |
| 1495 | const Register& ra) { |
| 1496 | DCHECK(rd.Is64Bits() && ra.Is64Bits()); |
| 1497 | DCHECK(rn.Is32Bits() && rm.Is32Bits()); |
| 1498 | DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x); |
| 1499 | } |
| 1500 | |
| 1501 | |
| 1502 | void Assembler::smull(const Register& rd, |
| 1503 | const Register& rn, |
| 1504 | const Register& rm) { |
| 1505 | DCHECK(rd.Is64Bits()); |
| 1506 | DCHECK(rn.Is32Bits() && rm.Is32Bits()); |
| 1507 | DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x); |
| 1508 | } |
| 1509 | |
| 1510 | |
| 1511 | void Assembler::smulh(const Register& rd, |
| 1512 | const Register& rn, |
| 1513 | const Register& rm) { |
| 1514 | DCHECK(AreSameSizeAndType(rd, rn, rm)); |
| 1515 | DataProcessing3Source(rd, rn, rm, xzr, SMULH_x); |
| 1516 | } |
| 1517 | |
| 1518 | |
| 1519 | void Assembler::sdiv(const Register& rd, |
| 1520 | const Register& rn, |
| 1521 | const Register& rm) { |
| 1522 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 1523 | DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| 1524 | Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd)); |
| 1525 | } |
| 1526 | |
| 1527 | |
| 1528 | void Assembler::udiv(const Register& rd, |
| 1529 | const Register& rn, |
| 1530 | const Register& rm) { |
| 1531 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 1532 | DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| 1533 | Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd)); |
| 1534 | } |
| 1535 | |
| 1536 | |
| 1537 | void Assembler::rbit(const Register& rd, |
| 1538 | const Register& rn) { |
| 1539 | DataProcessing1Source(rd, rn, RBIT); |
| 1540 | } |
| 1541 | |
| 1542 | |
| 1543 | void Assembler::rev16(const Register& rd, |
| 1544 | const Register& rn) { |
| 1545 | DataProcessing1Source(rd, rn, REV16); |
| 1546 | } |
| 1547 | |
| 1548 | |
| 1549 | void Assembler::rev32(const Register& rd, |
| 1550 | const Register& rn) { |
| 1551 | DCHECK(rd.Is64Bits()); |
| 1552 | DataProcessing1Source(rd, rn, REV); |
| 1553 | } |
| 1554 | |
| 1555 | |
| 1556 | void Assembler::rev(const Register& rd, |
| 1557 | const Register& rn) { |
| 1558 | DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w); |
| 1559 | } |
| 1560 | |
| 1561 | |
| 1562 | void Assembler::clz(const Register& rd, |
| 1563 | const Register& rn) { |
| 1564 | DataProcessing1Source(rd, rn, CLZ); |
| 1565 | } |
| 1566 | |
| 1567 | |
| 1568 | void Assembler::cls(const Register& rd, |
| 1569 | const Register& rn) { |
| 1570 | DataProcessing1Source(rd, rn, CLS); |
| 1571 | } |
| 1572 | |
| 1573 | |
| 1574 | void Assembler::ldp(const CPURegister& rt, |
| 1575 | const CPURegister& rt2, |
| 1576 | const MemOperand& src) { |
| 1577 | LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2)); |
| 1578 | } |
| 1579 | |
| 1580 | |
| 1581 | void Assembler::stp(const CPURegister& rt, |
| 1582 | const CPURegister& rt2, |
| 1583 | const MemOperand& dst) { |
| 1584 | LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2)); |
| 1585 | } |
| 1586 | |
| 1587 | |
| 1588 | void Assembler::ldpsw(const Register& rt, |
| 1589 | const Register& rt2, |
| 1590 | const MemOperand& src) { |
| 1591 | DCHECK(rt.Is64Bits()); |
| 1592 | LoadStorePair(rt, rt2, src, LDPSW_x); |
| 1593 | } |
| 1594 | |
| 1595 | |
| 1596 | void Assembler::LoadStorePair(const CPURegister& rt, |
| 1597 | const CPURegister& rt2, |
| 1598 | const MemOperand& addr, |
| 1599 | LoadStorePairOp op) { |
| 1600 | // 'rt' and 'rt2' can only be aliased for stores. |
| 1601 | DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2)); |
| 1602 | DCHECK(AreSameSizeAndType(rt, rt2)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1603 | DCHECK(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op))); |
| 1604 | int offset = static_cast<int>(addr.offset()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1605 | |
| 1606 | Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1607 | ImmLSPair(offset, CalcLSPairDataSize(op)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1608 | |
| 1609 | Instr addrmodeop; |
| 1610 | if (addr.IsImmediateOffset()) { |
| 1611 | addrmodeop = LoadStorePairOffsetFixed; |
| 1612 | } else { |
| 1613 | // Pre-index and post-index modes. |
| 1614 | DCHECK(!rt.Is(addr.base())); |
| 1615 | DCHECK(!rt2.Is(addr.base())); |
| 1616 | DCHECK(addr.offset() != 0); |
| 1617 | if (addr.IsPreIndex()) { |
| 1618 | addrmodeop = LoadStorePairPreIndexFixed; |
| 1619 | } else { |
| 1620 | DCHECK(addr.IsPostIndex()); |
| 1621 | addrmodeop = LoadStorePairPostIndexFixed; |
| 1622 | } |
| 1623 | } |
| 1624 | Emit(addrmodeop | memop); |
| 1625 | } |
| 1626 | |
| 1627 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1628 | // Memory instructions. |
| 1629 | void Assembler::ldrb(const Register& rt, const MemOperand& src) { |
| 1630 | LoadStore(rt, src, LDRB_w); |
| 1631 | } |
| 1632 | |
| 1633 | |
| 1634 | void Assembler::strb(const Register& rt, const MemOperand& dst) { |
| 1635 | LoadStore(rt, dst, STRB_w); |
| 1636 | } |
| 1637 | |
| 1638 | |
| 1639 | void Assembler::ldrsb(const Register& rt, const MemOperand& src) { |
| 1640 | LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w); |
| 1641 | } |
| 1642 | |
| 1643 | |
| 1644 | void Assembler::ldrh(const Register& rt, const MemOperand& src) { |
| 1645 | LoadStore(rt, src, LDRH_w); |
| 1646 | } |
| 1647 | |
| 1648 | |
| 1649 | void Assembler::strh(const Register& rt, const MemOperand& dst) { |
| 1650 | LoadStore(rt, dst, STRH_w); |
| 1651 | } |
| 1652 | |
| 1653 | |
| 1654 | void Assembler::ldrsh(const Register& rt, const MemOperand& src) { |
| 1655 | LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w); |
| 1656 | } |
| 1657 | |
| 1658 | |
| 1659 | void Assembler::ldr(const CPURegister& rt, const MemOperand& src) { |
| 1660 | LoadStore(rt, src, LoadOpFor(rt)); |
| 1661 | } |
| 1662 | |
| 1663 | |
| 1664 | void Assembler::str(const CPURegister& rt, const MemOperand& src) { |
| 1665 | LoadStore(rt, src, StoreOpFor(rt)); |
| 1666 | } |
| 1667 | |
| 1668 | |
| 1669 | void Assembler::ldrsw(const Register& rt, const MemOperand& src) { |
| 1670 | DCHECK(rt.Is64Bits()); |
| 1671 | LoadStore(rt, src, LDRSW_x); |
| 1672 | } |
| 1673 | |
| 1674 | |
| 1675 | void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) { |
| 1676 | // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a |
| 1677 | // constant pool. It should not be emitted. |
| 1678 | DCHECK(!rt.IsZero()); |
| 1679 | Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt)); |
| 1680 | } |
| 1681 | |
| 1682 | |
| 1683 | void Assembler::ldr(const CPURegister& rt, const Immediate& imm) { |
| 1684 | // Currently we only support 64-bit literals. |
| 1685 | DCHECK(rt.Is64Bits()); |
| 1686 | |
| 1687 | RecordRelocInfo(imm.rmode(), imm.value()); |
| 1688 | BlockConstPoolFor(1); |
| 1689 | // The load will be patched when the constpool is emitted, patching code |
| 1690 | // expect a load literal with offset 0. |
| 1691 | ldr_pcrel(rt, 0); |
| 1692 | } |
| 1693 | |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 1694 | void Assembler::ldar(const Register& rt, const Register& rn) { |
| 1695 | DCHECK(rn.Is64Bits()); |
| 1696 | LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAR_w : LDAR_x; |
| 1697 | Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); |
| 1698 | } |
| 1699 | |
| 1700 | void Assembler::ldaxr(const Register& rt, const Register& rn) { |
| 1701 | DCHECK(rn.Is64Bits()); |
| 1702 | LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAXR_w : LDAXR_x; |
| 1703 | Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); |
| 1704 | } |
| 1705 | |
| 1706 | void Assembler::stlr(const Register& rt, const Register& rn) { |
| 1707 | DCHECK(rn.Is64Bits()); |
| 1708 | LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLR_w : STLR_x; |
| 1709 | Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); |
| 1710 | } |
| 1711 | |
| 1712 | void Assembler::stlxr(const Register& rs, const Register& rt, |
| 1713 | const Register& rn) { |
| 1714 | DCHECK(rs.Is32Bits()); |
| 1715 | DCHECK(rn.Is64Bits()); |
| 1716 | LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x; |
| 1717 | Emit(op | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt)); |
| 1718 | } |
| 1719 | |
| 1720 | void Assembler::ldarb(const Register& rt, const Register& rn) { |
| 1721 | DCHECK(rt.Is32Bits()); |
| 1722 | DCHECK(rn.Is64Bits()); |
| 1723 | Emit(LDAR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); |
| 1724 | } |
| 1725 | |
| 1726 | void Assembler::ldaxrb(const Register& rt, const Register& rn) { |
| 1727 | DCHECK(rt.Is32Bits()); |
| 1728 | DCHECK(rn.Is64Bits()); |
| 1729 | Emit(LDAXR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); |
| 1730 | } |
| 1731 | |
| 1732 | void Assembler::stlrb(const Register& rt, const Register& rn) { |
| 1733 | DCHECK(rt.Is32Bits()); |
| 1734 | DCHECK(rn.Is64Bits()); |
| 1735 | Emit(STLR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); |
| 1736 | } |
| 1737 | |
| 1738 | void Assembler::stlxrb(const Register& rs, const Register& rt, |
| 1739 | const Register& rn) { |
| 1740 | DCHECK(rs.Is32Bits()); |
| 1741 | DCHECK(rt.Is32Bits()); |
| 1742 | DCHECK(rn.Is64Bits()); |
| 1743 | Emit(STLXR_b | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt)); |
| 1744 | } |
| 1745 | |
| 1746 | void Assembler::ldarh(const Register& rt, const Register& rn) { |
| 1747 | DCHECK(rt.Is32Bits()); |
| 1748 | DCHECK(rn.Is64Bits()); |
| 1749 | Emit(LDAR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); |
| 1750 | } |
| 1751 | |
| 1752 | void Assembler::ldaxrh(const Register& rt, const Register& rn) { |
| 1753 | DCHECK(rt.Is32Bits()); |
| 1754 | DCHECK(rn.Is64Bits()); |
| 1755 | Emit(LDAXR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); |
| 1756 | } |
| 1757 | |
| 1758 | void Assembler::stlrh(const Register& rt, const Register& rn) { |
| 1759 | DCHECK(rt.Is32Bits()); |
| 1760 | DCHECK(rn.Is64Bits()); |
| 1761 | Emit(STLR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); |
| 1762 | } |
| 1763 | |
| 1764 | void Assembler::stlxrh(const Register& rs, const Register& rt, |
| 1765 | const Register& rn) { |
| 1766 | DCHECK(rs.Is32Bits()); |
| 1767 | DCHECK(rt.Is32Bits()); |
| 1768 | DCHECK(rn.Is64Bits()); |
| 1769 | Emit(STLXR_h | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt)); |
| 1770 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1771 | |
| 1772 | void Assembler::mov(const Register& rd, const Register& rm) { |
| 1773 | // Moves involving the stack pointer are encoded as add immediate with |
| 1774 | // second operand of zero. Otherwise, orr with first operand zr is |
| 1775 | // used. |
| 1776 | if (rd.IsSP() || rm.IsSP()) { |
| 1777 | add(rd, rm, 0); |
| 1778 | } else { |
| 1779 | orr(rd, AppropriateZeroRegFor(rd), rm); |
| 1780 | } |
| 1781 | } |
| 1782 | |
| 1783 | |
| 1784 | void Assembler::mvn(const Register& rd, const Operand& operand) { |
| 1785 | orn(rd, AppropriateZeroRegFor(rd), operand); |
| 1786 | } |
| 1787 | |
| 1788 | |
| 1789 | void Assembler::mrs(const Register& rt, SystemRegister sysreg) { |
| 1790 | DCHECK(rt.Is64Bits()); |
| 1791 | Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt)); |
| 1792 | } |
| 1793 | |
| 1794 | |
| 1795 | void Assembler::msr(SystemRegister sysreg, const Register& rt) { |
| 1796 | DCHECK(rt.Is64Bits()); |
| 1797 | Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg)); |
| 1798 | } |
| 1799 | |
| 1800 | |
| 1801 | void Assembler::hint(SystemHint code) { |
| 1802 | Emit(HINT | ImmHint(code) | Rt(xzr)); |
| 1803 | } |
| 1804 | |
| 1805 | |
| 1806 | void Assembler::dmb(BarrierDomain domain, BarrierType type) { |
| 1807 | Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type)); |
| 1808 | } |
| 1809 | |
| 1810 | |
| 1811 | void Assembler::dsb(BarrierDomain domain, BarrierType type) { |
| 1812 | Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); |
| 1813 | } |
| 1814 | |
| 1815 | |
| 1816 | void Assembler::isb() { |
| 1817 | Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); |
| 1818 | } |
| 1819 | |
| 1820 | |
| 1821 | void Assembler::fmov(FPRegister fd, double imm) { |
| 1822 | DCHECK(fd.Is64Bits()); |
| 1823 | DCHECK(IsImmFP64(imm)); |
| 1824 | Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm)); |
| 1825 | } |
| 1826 | |
| 1827 | |
| 1828 | void Assembler::fmov(FPRegister fd, float imm) { |
| 1829 | DCHECK(fd.Is32Bits()); |
| 1830 | DCHECK(IsImmFP32(imm)); |
| 1831 | Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm)); |
| 1832 | } |
| 1833 | |
| 1834 | |
| 1835 | void Assembler::fmov(Register rd, FPRegister fn) { |
| 1836 | DCHECK(rd.SizeInBits() == fn.SizeInBits()); |
| 1837 | FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd; |
| 1838 | Emit(op | Rd(rd) | Rn(fn)); |
| 1839 | } |
| 1840 | |
| 1841 | |
| 1842 | void Assembler::fmov(FPRegister fd, Register rn) { |
| 1843 | DCHECK(fd.SizeInBits() == rn.SizeInBits()); |
| 1844 | FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx; |
| 1845 | Emit(op | Rd(fd) | Rn(rn)); |
| 1846 | } |
| 1847 | |
| 1848 | |
| 1849 | void Assembler::fmov(FPRegister fd, FPRegister fn) { |
| 1850 | DCHECK(fd.SizeInBits() == fn.SizeInBits()); |
| 1851 | Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn)); |
| 1852 | } |
| 1853 | |
| 1854 | |
| 1855 | void Assembler::fadd(const FPRegister& fd, |
| 1856 | const FPRegister& fn, |
| 1857 | const FPRegister& fm) { |
| 1858 | FPDataProcessing2Source(fd, fn, fm, FADD); |
| 1859 | } |
| 1860 | |
| 1861 | |
| 1862 | void Assembler::fsub(const FPRegister& fd, |
| 1863 | const FPRegister& fn, |
| 1864 | const FPRegister& fm) { |
| 1865 | FPDataProcessing2Source(fd, fn, fm, FSUB); |
| 1866 | } |
| 1867 | |
| 1868 | |
| 1869 | void Assembler::fmul(const FPRegister& fd, |
| 1870 | const FPRegister& fn, |
| 1871 | const FPRegister& fm) { |
| 1872 | FPDataProcessing2Source(fd, fn, fm, FMUL); |
| 1873 | } |
| 1874 | |
| 1875 | |
| 1876 | void Assembler::fmadd(const FPRegister& fd, |
| 1877 | const FPRegister& fn, |
| 1878 | const FPRegister& fm, |
| 1879 | const FPRegister& fa) { |
| 1880 | FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d); |
| 1881 | } |
| 1882 | |
| 1883 | |
| 1884 | void Assembler::fmsub(const FPRegister& fd, |
| 1885 | const FPRegister& fn, |
| 1886 | const FPRegister& fm, |
| 1887 | const FPRegister& fa) { |
| 1888 | FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d); |
| 1889 | } |
| 1890 | |
| 1891 | |
| 1892 | void Assembler::fnmadd(const FPRegister& fd, |
| 1893 | const FPRegister& fn, |
| 1894 | const FPRegister& fm, |
| 1895 | const FPRegister& fa) { |
| 1896 | FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d); |
| 1897 | } |
| 1898 | |
| 1899 | |
| 1900 | void Assembler::fnmsub(const FPRegister& fd, |
| 1901 | const FPRegister& fn, |
| 1902 | const FPRegister& fm, |
| 1903 | const FPRegister& fa) { |
| 1904 | FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d); |
| 1905 | } |
| 1906 | |
| 1907 | |
| 1908 | void Assembler::fdiv(const FPRegister& fd, |
| 1909 | const FPRegister& fn, |
| 1910 | const FPRegister& fm) { |
| 1911 | FPDataProcessing2Source(fd, fn, fm, FDIV); |
| 1912 | } |
| 1913 | |
| 1914 | |
| 1915 | void Assembler::fmax(const FPRegister& fd, |
| 1916 | const FPRegister& fn, |
| 1917 | const FPRegister& fm) { |
| 1918 | FPDataProcessing2Source(fd, fn, fm, FMAX); |
| 1919 | } |
| 1920 | |
| 1921 | |
| 1922 | void Assembler::fmaxnm(const FPRegister& fd, |
| 1923 | const FPRegister& fn, |
| 1924 | const FPRegister& fm) { |
| 1925 | FPDataProcessing2Source(fd, fn, fm, FMAXNM); |
| 1926 | } |
| 1927 | |
| 1928 | |
| 1929 | void Assembler::fmin(const FPRegister& fd, |
| 1930 | const FPRegister& fn, |
| 1931 | const FPRegister& fm) { |
| 1932 | FPDataProcessing2Source(fd, fn, fm, FMIN); |
| 1933 | } |
| 1934 | |
| 1935 | |
| 1936 | void Assembler::fminnm(const FPRegister& fd, |
| 1937 | const FPRegister& fn, |
| 1938 | const FPRegister& fm) { |
| 1939 | FPDataProcessing2Source(fd, fn, fm, FMINNM); |
| 1940 | } |
| 1941 | |
| 1942 | |
| 1943 | void Assembler::fabs(const FPRegister& fd, |
| 1944 | const FPRegister& fn) { |
| 1945 | DCHECK(fd.SizeInBits() == fn.SizeInBits()); |
| 1946 | FPDataProcessing1Source(fd, fn, FABS); |
| 1947 | } |
| 1948 | |
| 1949 | |
| 1950 | void Assembler::fneg(const FPRegister& fd, |
| 1951 | const FPRegister& fn) { |
| 1952 | DCHECK(fd.SizeInBits() == fn.SizeInBits()); |
| 1953 | FPDataProcessing1Source(fd, fn, FNEG); |
| 1954 | } |
| 1955 | |
| 1956 | |
| 1957 | void Assembler::fsqrt(const FPRegister& fd, |
| 1958 | const FPRegister& fn) { |
| 1959 | DCHECK(fd.SizeInBits() == fn.SizeInBits()); |
| 1960 | FPDataProcessing1Source(fd, fn, FSQRT); |
| 1961 | } |
| 1962 | |
| 1963 | |
| 1964 | void Assembler::frinta(const FPRegister& fd, |
| 1965 | const FPRegister& fn) { |
| 1966 | DCHECK(fd.SizeInBits() == fn.SizeInBits()); |
| 1967 | FPDataProcessing1Source(fd, fn, FRINTA); |
| 1968 | } |
| 1969 | |
| 1970 | |
| 1971 | void Assembler::frintm(const FPRegister& fd, |
| 1972 | const FPRegister& fn) { |
| 1973 | DCHECK(fd.SizeInBits() == fn.SizeInBits()); |
| 1974 | FPDataProcessing1Source(fd, fn, FRINTM); |
| 1975 | } |
| 1976 | |
| 1977 | |
| 1978 | void Assembler::frintn(const FPRegister& fd, |
| 1979 | const FPRegister& fn) { |
| 1980 | DCHECK(fd.SizeInBits() == fn.SizeInBits()); |
| 1981 | FPDataProcessing1Source(fd, fn, FRINTN); |
| 1982 | } |
| 1983 | |
| 1984 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1985 | void Assembler::frintp(const FPRegister& fd, const FPRegister& fn) { |
| 1986 | DCHECK(fd.SizeInBits() == fn.SizeInBits()); |
| 1987 | FPDataProcessing1Source(fd, fn, FRINTP); |
| 1988 | } |
| 1989 | |
| 1990 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1991 | void Assembler::frintz(const FPRegister& fd, |
| 1992 | const FPRegister& fn) { |
| 1993 | DCHECK(fd.SizeInBits() == fn.SizeInBits()); |
| 1994 | FPDataProcessing1Source(fd, fn, FRINTZ); |
| 1995 | } |
| 1996 | |
| 1997 | |
| 1998 | void Assembler::fcmp(const FPRegister& fn, |
| 1999 | const FPRegister& fm) { |
| 2000 | DCHECK(fn.SizeInBits() == fm.SizeInBits()); |
| 2001 | Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn)); |
| 2002 | } |
| 2003 | |
| 2004 | |
| 2005 | void Assembler::fcmp(const FPRegister& fn, |
| 2006 | double value) { |
| 2007 | USE(value); |
| 2008 | // Although the fcmp instruction can strictly only take an immediate value of |
| 2009 | // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't |
| 2010 | // affect the result of the comparison. |
| 2011 | DCHECK(value == 0.0); |
| 2012 | Emit(FPType(fn) | FCMP_zero | Rn(fn)); |
| 2013 | } |
| 2014 | |
| 2015 | |
| 2016 | void Assembler::fccmp(const FPRegister& fn, |
| 2017 | const FPRegister& fm, |
| 2018 | StatusFlags nzcv, |
| 2019 | Condition cond) { |
| 2020 | DCHECK(fn.SizeInBits() == fm.SizeInBits()); |
| 2021 | Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv)); |
| 2022 | } |
| 2023 | |
| 2024 | |
| 2025 | void Assembler::fcsel(const FPRegister& fd, |
| 2026 | const FPRegister& fn, |
| 2027 | const FPRegister& fm, |
| 2028 | Condition cond) { |
| 2029 | DCHECK(fd.SizeInBits() == fn.SizeInBits()); |
| 2030 | DCHECK(fd.SizeInBits() == fm.SizeInBits()); |
| 2031 | Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd)); |
| 2032 | } |
| 2033 | |
| 2034 | |
| 2035 | void Assembler::FPConvertToInt(const Register& rd, |
| 2036 | const FPRegister& fn, |
| 2037 | FPIntegerConvertOp op) { |
| 2038 | Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd)); |
| 2039 | } |
| 2040 | |
| 2041 | |
| 2042 | void Assembler::fcvt(const FPRegister& fd, |
| 2043 | const FPRegister& fn) { |
| 2044 | if (fd.Is64Bits()) { |
| 2045 | // Convert float to double. |
| 2046 | DCHECK(fn.Is32Bits()); |
| 2047 | FPDataProcessing1Source(fd, fn, FCVT_ds); |
| 2048 | } else { |
| 2049 | // Convert double to float. |
| 2050 | DCHECK(fn.Is64Bits()); |
| 2051 | FPDataProcessing1Source(fd, fn, FCVT_sd); |
| 2052 | } |
| 2053 | } |
| 2054 | |
| 2055 | |
| 2056 | void Assembler::fcvtau(const Register& rd, const FPRegister& fn) { |
| 2057 | FPConvertToInt(rd, fn, FCVTAU); |
| 2058 | } |
| 2059 | |
| 2060 | |
| 2061 | void Assembler::fcvtas(const Register& rd, const FPRegister& fn) { |
| 2062 | FPConvertToInt(rd, fn, FCVTAS); |
| 2063 | } |
| 2064 | |
| 2065 | |
| 2066 | void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) { |
| 2067 | FPConvertToInt(rd, fn, FCVTMU); |
| 2068 | } |
| 2069 | |
| 2070 | |
| 2071 | void Assembler::fcvtms(const Register& rd, const FPRegister& fn) { |
| 2072 | FPConvertToInt(rd, fn, FCVTMS); |
| 2073 | } |
| 2074 | |
| 2075 | |
| 2076 | void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) { |
| 2077 | FPConvertToInt(rd, fn, FCVTNU); |
| 2078 | } |
| 2079 | |
| 2080 | |
| 2081 | void Assembler::fcvtns(const Register& rd, const FPRegister& fn) { |
| 2082 | FPConvertToInt(rd, fn, FCVTNS); |
| 2083 | } |
| 2084 | |
| 2085 | |
| 2086 | void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) { |
| 2087 | FPConvertToInt(rd, fn, FCVTZU); |
| 2088 | } |
| 2089 | |
| 2090 | |
| 2091 | void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) { |
| 2092 | FPConvertToInt(rd, fn, FCVTZS); |
| 2093 | } |
| 2094 | |
| 2095 | |
| 2096 | void Assembler::scvtf(const FPRegister& fd, |
| 2097 | const Register& rn, |
| 2098 | unsigned fbits) { |
| 2099 | if (fbits == 0) { |
| 2100 | Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd)); |
| 2101 | } else { |
| 2102 | Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | |
| 2103 | Rd(fd)); |
| 2104 | } |
| 2105 | } |
| 2106 | |
| 2107 | |
| 2108 | void Assembler::ucvtf(const FPRegister& fd, |
| 2109 | const Register& rn, |
| 2110 | unsigned fbits) { |
| 2111 | if (fbits == 0) { |
| 2112 | Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd)); |
| 2113 | } else { |
| 2114 | Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | |
| 2115 | Rd(fd)); |
| 2116 | } |
| 2117 | } |
| 2118 | |
| 2119 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2120 | void Assembler::dcptr(Label* label) { |
| 2121 | RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); |
| 2122 | if (label->is_bound()) { |
| 2123 | // The label is bound, so it does not need to be updated and the internal |
| 2124 | // reference should be emitted. |
| 2125 | // |
| 2126 | // In this case, label->pos() returns the offset of the label from the |
| 2127 | // start of the buffer. |
| 2128 | internal_reference_positions_.push_back(pc_offset()); |
| 2129 | dc64(reinterpret_cast<uintptr_t>(buffer_ + label->pos())); |
| 2130 | } else { |
| 2131 | int32_t offset; |
| 2132 | if (label->is_linked()) { |
| 2133 | // The label is linked, so the internal reference should be added |
| 2134 | // onto the end of the label's link chain. |
| 2135 | // |
| 2136 | // In this case, label->pos() returns the offset of the last linked |
| 2137 | // instruction from the start of the buffer. |
| 2138 | offset = label->pos() - pc_offset(); |
| 2139 | DCHECK(offset != kStartOfLabelLinkChain); |
| 2140 | } else { |
| 2141 | // The label is unused, so it now becomes linked and the internal |
| 2142 | // reference is at the start of the new link chain. |
| 2143 | offset = kStartOfLabelLinkChain; |
| 2144 | } |
| 2145 | // The instruction at pc is now the last link in the label's chain. |
| 2146 | label->link_to(pc_offset()); |
| 2147 | |
| 2148 | // Traditionally the offset to the previous instruction in the chain is |
| 2149 | // encoded in the instruction payload (e.g. branch range) but internal |
| 2150 | // references are not instructions so while unbound they are encoded as |
| 2151 | // two consecutive brk instructions. The two 16-bit immediates are used |
| 2152 | // to encode the offset. |
| 2153 | offset >>= kInstructionSizeLog2; |
| 2154 | DCHECK(is_int32(offset)); |
| 2155 | uint32_t high16 = unsigned_bitextract_32(31, 16, offset); |
| 2156 | uint32_t low16 = unsigned_bitextract_32(15, 0, offset); |
| 2157 | |
| 2158 | brk(high16); |
| 2159 | brk(low16); |
| 2160 | } |
| 2161 | } |
| 2162 | |
| 2163 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2164 | // Note: |
| 2165 | // Below, a difference in case for the same letter indicates a |
| 2166 | // negated bit. |
| 2167 | // If b is 1, then B is 0. |
| 2168 | Instr Assembler::ImmFP32(float imm) { |
| 2169 | DCHECK(IsImmFP32(imm)); |
| 2170 | // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000 |
| 2171 | uint32_t bits = float_to_rawbits(imm); |
| 2172 | // bit7: a000.0000 |
| 2173 | uint32_t bit7 = ((bits >> 31) & 0x1) << 7; |
| 2174 | // bit6: 0b00.0000 |
| 2175 | uint32_t bit6 = ((bits >> 29) & 0x1) << 6; |
| 2176 | // bit5_to_0: 00cd.efgh |
| 2177 | uint32_t bit5_to_0 = (bits >> 19) & 0x3f; |
| 2178 | |
| 2179 | return (bit7 | bit6 | bit5_to_0) << ImmFP_offset; |
| 2180 | } |
| 2181 | |
| 2182 | |
| 2183 | Instr Assembler::ImmFP64(double imm) { |
| 2184 | DCHECK(IsImmFP64(imm)); |
| 2185 | // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 |
| 2186 | // 0000.0000.0000.0000.0000.0000.0000.0000 |
| 2187 | uint64_t bits = double_to_rawbits(imm); |
| 2188 | // bit7: a000.0000 |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2189 | uint64_t bit7 = ((bits >> 63) & 0x1) << 7; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2190 | // bit6: 0b00.0000 |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2191 | uint64_t bit6 = ((bits >> 61) & 0x1) << 6; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2192 | // bit5_to_0: 00cd.efgh |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2193 | uint64_t bit5_to_0 = (bits >> 48) & 0x3f; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2194 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2195 | return static_cast<Instr>((bit7 | bit6 | bit5_to_0) << ImmFP_offset); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2196 | } |
| 2197 | |
| 2198 | |
| 2199 | // Code generation helpers. |
| 2200 | void Assembler::MoveWide(const Register& rd, |
| 2201 | uint64_t imm, |
| 2202 | int shift, |
| 2203 | MoveWideImmediateOp mov_op) { |
| 2204 | // Ignore the top 32 bits of an immediate if we're moving to a W register. |
| 2205 | if (rd.Is32Bits()) { |
| 2206 | // Check that the top 32 bits are zero (a positive 32-bit number) or top |
| 2207 | // 33 bits are one (a negative 32-bit number, sign extended to 64 bits). |
| 2208 | DCHECK(((imm >> kWRegSizeInBits) == 0) || |
| 2209 | ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff)); |
| 2210 | imm &= kWRegMask; |
| 2211 | } |
| 2212 | |
| 2213 | if (shift >= 0) { |
| 2214 | // Explicit shift specified. |
| 2215 | DCHECK((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48)); |
| 2216 | DCHECK(rd.Is64Bits() || (shift == 0) || (shift == 16)); |
| 2217 | shift /= 16; |
| 2218 | } else { |
| 2219 | // Calculate a new immediate and shift combination to encode the immediate |
| 2220 | // argument. |
| 2221 | shift = 0; |
| 2222 | if ((imm & ~0xffffUL) == 0) { |
| 2223 | // Nothing to do. |
| 2224 | } else if ((imm & ~(0xffffUL << 16)) == 0) { |
| 2225 | imm >>= 16; |
| 2226 | shift = 1; |
| 2227 | } else if ((imm & ~(0xffffUL << 32)) == 0) { |
| 2228 | DCHECK(rd.Is64Bits()); |
| 2229 | imm >>= 32; |
| 2230 | shift = 2; |
| 2231 | } else if ((imm & ~(0xffffUL << 48)) == 0) { |
| 2232 | DCHECK(rd.Is64Bits()); |
| 2233 | imm >>= 48; |
| 2234 | shift = 3; |
| 2235 | } |
| 2236 | } |
| 2237 | |
| 2238 | DCHECK(is_uint16(imm)); |
| 2239 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2240 | Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) | |
| 2241 | ImmMoveWide(static_cast<int>(imm)) | ShiftMoveWide(shift)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2242 | } |
| 2243 | |
| 2244 | |
| 2245 | void Assembler::AddSub(const Register& rd, |
| 2246 | const Register& rn, |
| 2247 | const Operand& operand, |
| 2248 | FlagsUpdate S, |
| 2249 | AddSubOp op) { |
| 2250 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 2251 | DCHECK(!operand.NeedsRelocation(this)); |
| 2252 | if (operand.IsImmediate()) { |
| 2253 | int64_t immediate = operand.ImmediateValue(); |
| 2254 | DCHECK(IsImmAddSub(immediate)); |
| 2255 | Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); |
| 2256 | Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2257 | ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2258 | } else if (operand.IsShiftedRegister()) { |
| 2259 | DCHECK(operand.reg().SizeInBits() == rd.SizeInBits()); |
| 2260 | DCHECK(operand.shift() != ROR); |
| 2261 | |
| 2262 | // For instructions of the form: |
| 2263 | // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ] |
| 2264 | // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ] |
| 2265 | // add/sub wsp, wsp, <Wm> [, LSL #0-3 ] |
| 2266 | // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ] |
| 2267 | // or their 64-bit register equivalents, convert the operand from shifted to |
| 2268 | // extended register mode, and emit an add/sub extended instruction. |
| 2269 | if (rn.IsSP() || rd.IsSP()) { |
| 2270 | DCHECK(!(rd.IsSP() && (S == SetFlags))); |
| 2271 | DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S, |
| 2272 | AddSubExtendedFixed | op); |
| 2273 | } else { |
| 2274 | DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op); |
| 2275 | } |
| 2276 | } else { |
| 2277 | DCHECK(operand.IsExtendedRegister()); |
| 2278 | DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op); |
| 2279 | } |
| 2280 | } |
| 2281 | |
| 2282 | |
| 2283 | void Assembler::AddSubWithCarry(const Register& rd, |
| 2284 | const Register& rn, |
| 2285 | const Operand& operand, |
| 2286 | FlagsUpdate S, |
| 2287 | AddSubWithCarryOp op) { |
| 2288 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 2289 | DCHECK(rd.SizeInBits() == operand.reg().SizeInBits()); |
| 2290 | DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); |
| 2291 | DCHECK(!operand.NeedsRelocation(this)); |
| 2292 | Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd)); |
| 2293 | } |
| 2294 | |
| 2295 | |
| 2296 | void Assembler::hlt(int code) { |
| 2297 | DCHECK(is_uint16(code)); |
| 2298 | Emit(HLT | ImmException(code)); |
| 2299 | } |
| 2300 | |
| 2301 | |
| 2302 | void Assembler::brk(int code) { |
| 2303 | DCHECK(is_uint16(code)); |
| 2304 | Emit(BRK | ImmException(code)); |
| 2305 | } |
| 2306 | |
| 2307 | |
| 2308 | void Assembler::EmitStringData(const char* string) { |
| 2309 | size_t len = strlen(string) + 1; |
| 2310 | DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2311 | EmitData(string, static_cast<int>(len)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2312 | // Pad with NULL characters until pc_ is aligned. |
| 2313 | const char pad[] = {'\0', '\0', '\0', '\0'}; |
| 2314 | STATIC_ASSERT(sizeof(pad) == kInstructionSize); |
| 2315 | EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset()); |
| 2316 | } |
| 2317 | |
| 2318 | |
| 2319 | void Assembler::debug(const char* message, uint32_t code, Instr params) { |
| 2320 | #ifdef USE_SIMULATOR |
| 2321 | // Don't generate simulator specific code if we are building a snapshot, which |
| 2322 | // might be run on real hardware. |
| 2323 | if (!serializer_enabled()) { |
| 2324 | // The arguments to the debug marker need to be contiguous in memory, so |
| 2325 | // make sure we don't try to emit pools. |
| 2326 | BlockPoolsScope scope(this); |
| 2327 | |
| 2328 | Label start; |
| 2329 | bind(&start); |
| 2330 | |
| 2331 | // Refer to instructions-arm64.h for a description of the marker and its |
| 2332 | // arguments. |
| 2333 | hlt(kImmExceptionIsDebug); |
| 2334 | DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset); |
| 2335 | dc32(code); |
| 2336 | DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset); |
| 2337 | dc32(params); |
| 2338 | DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset); |
| 2339 | EmitStringData(message); |
| 2340 | hlt(kImmExceptionIsUnreachable); |
| 2341 | |
| 2342 | return; |
| 2343 | } |
| 2344 | // Fall through if Serializer is enabled. |
| 2345 | #endif |
| 2346 | |
| 2347 | if (params & BREAK) { |
| 2348 | hlt(kImmExceptionIsDebug); |
| 2349 | } |
| 2350 | } |
| 2351 | |
| 2352 | |
| 2353 | void Assembler::Logical(const Register& rd, |
| 2354 | const Register& rn, |
| 2355 | const Operand& operand, |
| 2356 | LogicalOp op) { |
| 2357 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 2358 | DCHECK(!operand.NeedsRelocation(this)); |
| 2359 | if (operand.IsImmediate()) { |
| 2360 | int64_t immediate = operand.ImmediateValue(); |
| 2361 | unsigned reg_size = rd.SizeInBits(); |
| 2362 | |
| 2363 | DCHECK(immediate != 0); |
| 2364 | DCHECK(immediate != -1); |
| 2365 | DCHECK(rd.Is64Bits() || is_uint32(immediate)); |
| 2366 | |
| 2367 | // If the operation is NOT, invert the operation and immediate. |
| 2368 | if ((op & NOT) == NOT) { |
| 2369 | op = static_cast<LogicalOp>(op & ~NOT); |
| 2370 | immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask); |
| 2371 | } |
| 2372 | |
| 2373 | unsigned n, imm_s, imm_r; |
| 2374 | if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { |
| 2375 | // Immediate can be encoded in the instruction. |
| 2376 | LogicalImmediate(rd, rn, n, imm_s, imm_r, op); |
| 2377 | } else { |
| 2378 | // This case is handled in the macro assembler. |
| 2379 | UNREACHABLE(); |
| 2380 | } |
| 2381 | } else { |
| 2382 | DCHECK(operand.IsShiftedRegister()); |
| 2383 | DCHECK(operand.reg().SizeInBits() == rd.SizeInBits()); |
| 2384 | Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed); |
| 2385 | DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op); |
| 2386 | } |
| 2387 | } |
| 2388 | |
| 2389 | |
| 2390 | void Assembler::LogicalImmediate(const Register& rd, |
| 2391 | const Register& rn, |
| 2392 | unsigned n, |
| 2393 | unsigned imm_s, |
| 2394 | unsigned imm_r, |
| 2395 | LogicalOp op) { |
| 2396 | unsigned reg_size = rd.SizeInBits(); |
| 2397 | Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd); |
| 2398 | Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) | |
| 2399 | ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | |
| 2400 | Rn(rn)); |
| 2401 | } |
| 2402 | |
| 2403 | |
| 2404 | void Assembler::ConditionalCompare(const Register& rn, |
| 2405 | const Operand& operand, |
| 2406 | StatusFlags nzcv, |
| 2407 | Condition cond, |
| 2408 | ConditionalCompareOp op) { |
| 2409 | Instr ccmpop; |
| 2410 | DCHECK(!operand.NeedsRelocation(this)); |
| 2411 | if (operand.IsImmediate()) { |
| 2412 | int64_t immediate = operand.ImmediateValue(); |
| 2413 | DCHECK(IsImmConditionalCompare(immediate)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2414 | ccmpop = ConditionalCompareImmediateFixed | op | |
| 2415 | ImmCondCmp(static_cast<unsigned>(immediate)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2416 | } else { |
| 2417 | DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); |
| 2418 | ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg()); |
| 2419 | } |
| 2420 | Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv)); |
| 2421 | } |
| 2422 | |
| 2423 | |
| 2424 | void Assembler::DataProcessing1Source(const Register& rd, |
| 2425 | const Register& rn, |
| 2426 | DataProcessing1SourceOp op) { |
| 2427 | DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| 2428 | Emit(SF(rn) | op | Rn(rn) | Rd(rd)); |
| 2429 | } |
| 2430 | |
| 2431 | |
| 2432 | void Assembler::FPDataProcessing1Source(const FPRegister& fd, |
| 2433 | const FPRegister& fn, |
| 2434 | FPDataProcessing1SourceOp op) { |
| 2435 | Emit(FPType(fn) | op | Rn(fn) | Rd(fd)); |
| 2436 | } |
| 2437 | |
| 2438 | |
| 2439 | void Assembler::FPDataProcessing2Source(const FPRegister& fd, |
| 2440 | const FPRegister& fn, |
| 2441 | const FPRegister& fm, |
| 2442 | FPDataProcessing2SourceOp op) { |
| 2443 | DCHECK(fd.SizeInBits() == fn.SizeInBits()); |
| 2444 | DCHECK(fd.SizeInBits() == fm.SizeInBits()); |
| 2445 | Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd)); |
| 2446 | } |
| 2447 | |
| 2448 | |
| 2449 | void Assembler::FPDataProcessing3Source(const FPRegister& fd, |
| 2450 | const FPRegister& fn, |
| 2451 | const FPRegister& fm, |
| 2452 | const FPRegister& fa, |
| 2453 | FPDataProcessing3SourceOp op) { |
| 2454 | DCHECK(AreSameSizeAndType(fd, fn, fm, fa)); |
| 2455 | Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa)); |
| 2456 | } |
| 2457 | |
| 2458 | |
| 2459 | void Assembler::EmitShift(const Register& rd, |
| 2460 | const Register& rn, |
| 2461 | Shift shift, |
| 2462 | unsigned shift_amount) { |
| 2463 | switch (shift) { |
| 2464 | case LSL: |
| 2465 | lsl(rd, rn, shift_amount); |
| 2466 | break; |
| 2467 | case LSR: |
| 2468 | lsr(rd, rn, shift_amount); |
| 2469 | break; |
| 2470 | case ASR: |
| 2471 | asr(rd, rn, shift_amount); |
| 2472 | break; |
| 2473 | case ROR: |
| 2474 | ror(rd, rn, shift_amount); |
| 2475 | break; |
| 2476 | default: |
| 2477 | UNREACHABLE(); |
| 2478 | } |
| 2479 | } |
| 2480 | |
| 2481 | |
| 2482 | void Assembler::EmitExtendShift(const Register& rd, |
| 2483 | const Register& rn, |
| 2484 | Extend extend, |
| 2485 | unsigned left_shift) { |
| 2486 | DCHECK(rd.SizeInBits() >= rn.SizeInBits()); |
| 2487 | unsigned reg_size = rd.SizeInBits(); |
| 2488 | // Use the correct size of register. |
| 2489 | Register rn_ = Register::Create(rn.code(), rd.SizeInBits()); |
| 2490 | // Bits extracted are high_bit:0. |
| 2491 | unsigned high_bit = (8 << (extend & 0x3)) - 1; |
| 2492 | // Number of bits left in the result that are not introduced by the shift. |
| 2493 | unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1); |
| 2494 | |
| 2495 | if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) { |
| 2496 | switch (extend) { |
| 2497 | case UXTB: |
| 2498 | case UXTH: |
| 2499 | case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break; |
| 2500 | case SXTB: |
| 2501 | case SXTH: |
| 2502 | case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break; |
| 2503 | case UXTX: |
| 2504 | case SXTX: { |
| 2505 | DCHECK(rn.SizeInBits() == kXRegSizeInBits); |
| 2506 | // Nothing to extend. Just shift. |
| 2507 | lsl(rd, rn_, left_shift); |
| 2508 | break; |
| 2509 | } |
| 2510 | default: UNREACHABLE(); |
| 2511 | } |
| 2512 | } else { |
| 2513 | // No need to extend as the extended bits would be shifted away. |
| 2514 | lsl(rd, rn_, left_shift); |
| 2515 | } |
| 2516 | } |
| 2517 | |
| 2518 | |
| 2519 | void Assembler::DataProcShiftedRegister(const Register& rd, |
| 2520 | const Register& rn, |
| 2521 | const Operand& operand, |
| 2522 | FlagsUpdate S, |
| 2523 | Instr op) { |
| 2524 | DCHECK(operand.IsShiftedRegister()); |
| 2525 | DCHECK(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount()))); |
| 2526 | DCHECK(!operand.NeedsRelocation(this)); |
| 2527 | Emit(SF(rd) | op | Flags(S) | |
| 2528 | ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) | |
| 2529 | Rm(operand.reg()) | Rn(rn) | Rd(rd)); |
| 2530 | } |
| 2531 | |
| 2532 | |
| 2533 | void Assembler::DataProcExtendedRegister(const Register& rd, |
| 2534 | const Register& rn, |
| 2535 | const Operand& operand, |
| 2536 | FlagsUpdate S, |
| 2537 | Instr op) { |
| 2538 | DCHECK(!operand.NeedsRelocation(this)); |
| 2539 | Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); |
| 2540 | Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | |
| 2541 | ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) | |
| 2542 | dest_reg | RnSP(rn)); |
| 2543 | } |
| 2544 | |
| 2545 | |
| 2546 | bool Assembler::IsImmAddSub(int64_t immediate) { |
| 2547 | return is_uint12(immediate) || |
| 2548 | (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0)); |
| 2549 | } |
| 2550 | |
| 2551 | void Assembler::LoadStore(const CPURegister& rt, |
| 2552 | const MemOperand& addr, |
| 2553 | LoadStoreOp op) { |
| 2554 | Instr memop = op | Rt(rt) | RnSP(addr.base()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2555 | |
| 2556 | if (addr.IsImmediateOffset()) { |
| 2557 | LSDataSize size = CalcLSDataSize(op); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2558 | if (IsImmLSScaled(addr.offset(), size)) { |
| 2559 | int offset = static_cast<int>(addr.offset()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2560 | // Use the scaled addressing mode. |
| 2561 | Emit(LoadStoreUnsignedOffsetFixed | memop | |
| 2562 | ImmLSUnsigned(offset >> size)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2563 | } else if (IsImmLSUnscaled(addr.offset())) { |
| 2564 | int offset = static_cast<int>(addr.offset()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2565 | // Use the unscaled addressing mode. |
| 2566 | Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset)); |
| 2567 | } else { |
| 2568 | // This case is handled in the macro assembler. |
| 2569 | UNREACHABLE(); |
| 2570 | } |
| 2571 | } else if (addr.IsRegisterOffset()) { |
| 2572 | Extend ext = addr.extend(); |
| 2573 | Shift shift = addr.shift(); |
| 2574 | unsigned shift_amount = addr.shift_amount(); |
| 2575 | |
| 2576 | // LSL is encoded in the option field as UXTX. |
| 2577 | if (shift == LSL) { |
| 2578 | ext = UXTX; |
| 2579 | } |
| 2580 | |
| 2581 | // Shifts are encoded in one bit, indicating a left shift by the memory |
| 2582 | // access size. |
| 2583 | DCHECK((shift_amount == 0) || |
| 2584 | (shift_amount == static_cast<unsigned>(CalcLSDataSize(op)))); |
| 2585 | Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) | |
| 2586 | ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0)); |
| 2587 | } else { |
| 2588 | // Pre-index and post-index modes. |
| 2589 | DCHECK(!rt.Is(addr.base())); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2590 | if (IsImmLSUnscaled(addr.offset())) { |
| 2591 | int offset = static_cast<int>(addr.offset()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2592 | if (addr.IsPreIndex()) { |
| 2593 | Emit(LoadStorePreIndexFixed | memop | ImmLS(offset)); |
| 2594 | } else { |
| 2595 | DCHECK(addr.IsPostIndex()); |
| 2596 | Emit(LoadStorePostIndexFixed | memop | ImmLS(offset)); |
| 2597 | } |
| 2598 | } else { |
| 2599 | // This case is handled in the macro assembler. |
| 2600 | UNREACHABLE(); |
| 2601 | } |
| 2602 | } |
| 2603 | } |
| 2604 | |
| 2605 | |
| 2606 | bool Assembler::IsImmLSUnscaled(int64_t offset) { |
| 2607 | return is_int9(offset); |
| 2608 | } |
| 2609 | |
| 2610 | |
| 2611 | bool Assembler::IsImmLSScaled(int64_t offset, LSDataSize size) { |
| 2612 | bool offset_is_size_multiple = (((offset >> size) << size) == offset); |
| 2613 | return offset_is_size_multiple && is_uint12(offset >> size); |
| 2614 | } |
| 2615 | |
| 2616 | |
| 2617 | bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) { |
| 2618 | bool offset_is_size_multiple = (((offset >> size) << size) == offset); |
| 2619 | return offset_is_size_multiple && is_int7(offset >> size); |
| 2620 | } |
| 2621 | |
| 2622 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2623 | bool Assembler::IsImmLLiteral(int64_t offset) { |
| 2624 | int inst_size = static_cast<int>(kInstructionSizeLog2); |
| 2625 | bool offset_is_inst_multiple = |
| 2626 | (((offset >> inst_size) << inst_size) == offset); |
| 2627 | return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width); |
| 2628 | } |
| 2629 | |
| 2630 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2631 | // Test if a given value can be encoded in the immediate field of a logical |
| 2632 | // instruction. |
| 2633 | // If it can be encoded, the function returns true, and values pointed to by n, |
| 2634 | // imm_s and imm_r are updated with immediates encoded in the format required |
| 2635 | // by the corresponding fields in the logical instruction. |
| 2636 | // If it can not be encoded, the function returns false, and the values pointed |
| 2637 | // to by n, imm_s and imm_r are undefined. |
| 2638 | bool Assembler::IsImmLogical(uint64_t value, |
| 2639 | unsigned width, |
| 2640 | unsigned* n, |
| 2641 | unsigned* imm_s, |
| 2642 | unsigned* imm_r) { |
| 2643 | DCHECK((n != NULL) && (imm_s != NULL) && (imm_r != NULL)); |
| 2644 | DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits)); |
| 2645 | |
| 2646 | bool negate = false; |
| 2647 | |
| 2648 | // Logical immediates are encoded using parameters n, imm_s and imm_r using |
| 2649 | // the following table: |
| 2650 | // |
| 2651 | // N imms immr size S R |
| 2652 | // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) |
| 2653 | // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) |
| 2654 | // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) |
| 2655 | // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) |
| 2656 | // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) |
| 2657 | // 0 11110s xxxxxr 2 UInt(s) UInt(r) |
| 2658 | // (s bits must not be all set) |
| 2659 | // |
| 2660 | // A pattern is constructed of size bits, where the least significant S+1 bits |
| 2661 | // are set. The pattern is rotated right by R, and repeated across a 32 or |
| 2662 | // 64-bit value, depending on destination register width. |
| 2663 | // |
| 2664 | // Put another way: the basic format of a logical immediate is a single |
| 2665 | // contiguous stretch of 1 bits, repeated across the whole word at intervals |
| 2666 | // given by a power of 2. To identify them quickly, we first locate the |
| 2667 | // lowest stretch of 1 bits, then the next 1 bit above that; that combination |
| 2668 | // is different for every logical immediate, so it gives us all the |
| 2669 | // information we need to identify the only logical immediate that our input |
| 2670 | // could be, and then we simply check if that's the value we actually have. |
| 2671 | // |
| 2672 | // (The rotation parameter does give the possibility of the stretch of 1 bits |
| 2673 | // going 'round the end' of the word. To deal with that, we observe that in |
| 2674 | // any situation where that happens the bitwise NOT of the value is also a |
| 2675 | // valid logical immediate. So we simply invert the input whenever its low bit |
| 2676 | // is set, and then we know that the rotated case can't arise.) |
| 2677 | |
| 2678 | if (value & 1) { |
| 2679 | // If the low bit is 1, negate the value, and set a flag to remember that we |
| 2680 | // did (so that we can adjust the return values appropriately). |
| 2681 | negate = true; |
| 2682 | value = ~value; |
| 2683 | } |
| 2684 | |
| 2685 | if (width == kWRegSizeInBits) { |
| 2686 | // To handle 32-bit logical immediates, the very easiest thing is to repeat |
| 2687 | // the input value twice to make a 64-bit word. The correct encoding of that |
| 2688 | // as a logical immediate will also be the correct encoding of the 32-bit |
| 2689 | // value. |
| 2690 | |
| 2691 | // The most-significant 32 bits may not be zero (ie. negate is true) so |
| 2692 | // shift the value left before duplicating it. |
| 2693 | value <<= kWRegSizeInBits; |
| 2694 | value |= value >> kWRegSizeInBits; |
| 2695 | } |
| 2696 | |
| 2697 | // The basic analysis idea: imagine our input word looks like this. |
| 2698 | // |
| 2699 | // 0011111000111110001111100011111000111110001111100011111000111110 |
| 2700 | // c b a |
| 2701 | // |<--d-->| |
| 2702 | // |
| 2703 | // We find the lowest set bit (as an actual power-of-2 value, not its index) |
| 2704 | // and call it a. Then we add a to our original number, which wipes out the |
| 2705 | // bottommost stretch of set bits and replaces it with a 1 carried into the |
| 2706 | // next zero bit. Then we look for the new lowest set bit, which is in |
| 2707 | // position b, and subtract it, so now our number is just like the original |
| 2708 | // but with the lowest stretch of set bits completely gone. Now we find the |
| 2709 | // lowest set bit again, which is position c in the diagram above. Then we'll |
| 2710 | // measure the distance d between bit positions a and c (using CLZ), and that |
| 2711 | // tells us that the only valid logical immediate that could possibly be equal |
| 2712 | // to this number is the one in which a stretch of bits running from a to just |
| 2713 | // below b is replicated every d bits. |
| 2714 | uint64_t a = LargestPowerOf2Divisor(value); |
| 2715 | uint64_t value_plus_a = value + a; |
| 2716 | uint64_t b = LargestPowerOf2Divisor(value_plus_a); |
| 2717 | uint64_t value_plus_a_minus_b = value_plus_a - b; |
| 2718 | uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b); |
| 2719 | |
| 2720 | int d, clz_a, out_n; |
| 2721 | uint64_t mask; |
| 2722 | |
| 2723 | if (c != 0) { |
| 2724 | // The general case, in which there is more than one stretch of set bits. |
| 2725 | // Compute the repeat distance d, and set up a bitmask covering the basic |
| 2726 | // unit of repetition (i.e. a word with the bottom d bits set). Also, in all |
| 2727 | // of these cases the N bit of the output will be zero. |
| 2728 | clz_a = CountLeadingZeros(a, kXRegSizeInBits); |
| 2729 | int clz_c = CountLeadingZeros(c, kXRegSizeInBits); |
| 2730 | d = clz_a - clz_c; |
| 2731 | mask = ((V8_UINT64_C(1) << d) - 1); |
| 2732 | out_n = 0; |
| 2733 | } else { |
| 2734 | // Handle degenerate cases. |
| 2735 | // |
| 2736 | // If any of those 'find lowest set bit' operations didn't find a set bit at |
| 2737 | // all, then the word will have been zero thereafter, so in particular the |
| 2738 | // last lowest_set_bit operation will have returned zero. So we can test for |
| 2739 | // all the special case conditions in one go by seeing if c is zero. |
| 2740 | if (a == 0) { |
| 2741 | // The input was zero (or all 1 bits, which will come to here too after we |
| 2742 | // inverted it at the start of the function), for which we just return |
| 2743 | // false. |
| 2744 | return false; |
| 2745 | } else { |
| 2746 | // Otherwise, if c was zero but a was not, then there's just one stretch |
| 2747 | // of set bits in our word, meaning that we have the trivial case of |
| 2748 | // d == 64 and only one 'repetition'. Set up all the same variables as in |
| 2749 | // the general case above, and set the N bit in the output. |
| 2750 | clz_a = CountLeadingZeros(a, kXRegSizeInBits); |
| 2751 | d = 64; |
| 2752 | mask = ~V8_UINT64_C(0); |
| 2753 | out_n = 1; |
| 2754 | } |
| 2755 | } |
| 2756 | |
| 2757 | // If the repeat period d is not a power of two, it can't be encoded. |
| 2758 | if (!IS_POWER_OF_TWO(d)) { |
| 2759 | return false; |
| 2760 | } |
| 2761 | |
| 2762 | if (((b - a) & ~mask) != 0) { |
| 2763 | // If the bit stretch (b - a) does not fit within the mask derived from the |
| 2764 | // repeat period, then fail. |
| 2765 | return false; |
| 2766 | } |
| 2767 | |
| 2768 | // The only possible option is b - a repeated every d bits. Now we're going to |
| 2769 | // actually construct the valid logical immediate derived from that |
| 2770 | // specification, and see if it equals our original input. |
| 2771 | // |
| 2772 | // To repeat a value every d bits, we multiply it by a number of the form |
| 2773 | // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can |
| 2774 | // be derived using a table lookup on CLZ(d). |
| 2775 | static const uint64_t multipliers[] = { |
| 2776 | 0x0000000000000001UL, |
| 2777 | 0x0000000100000001UL, |
| 2778 | 0x0001000100010001UL, |
| 2779 | 0x0101010101010101UL, |
| 2780 | 0x1111111111111111UL, |
| 2781 | 0x5555555555555555UL, |
| 2782 | }; |
| 2783 | int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57; |
| 2784 | // Ensure that the index to the multipliers array is within bounds. |
| 2785 | DCHECK((multiplier_idx >= 0) && |
| 2786 | (static_cast<size_t>(multiplier_idx) < arraysize(multipliers))); |
| 2787 | uint64_t multiplier = multipliers[multiplier_idx]; |
| 2788 | uint64_t candidate = (b - a) * multiplier; |
| 2789 | |
| 2790 | if (value != candidate) { |
| 2791 | // The candidate pattern doesn't match our input value, so fail. |
| 2792 | return false; |
| 2793 | } |
| 2794 | |
| 2795 | // We have a match! This is a valid logical immediate, so now we have to |
| 2796 | // construct the bits and pieces of the instruction encoding that generates |
| 2797 | // it. |
| 2798 | |
| 2799 | // Count the set bits in our basic stretch. The special case of clz(0) == -1 |
| 2800 | // makes the answer come out right for stretches that reach the very top of |
| 2801 | // the word (e.g. numbers like 0xffffc00000000000). |
| 2802 | int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits); |
| 2803 | int s = clz_a - clz_b; |
| 2804 | |
| 2805 | // Decide how many bits to rotate right by, to put the low bit of that basic |
| 2806 | // stretch in position a. |
| 2807 | int r; |
| 2808 | if (negate) { |
| 2809 | // If we inverted the input right at the start of this function, here's |
| 2810 | // where we compensate: the number of set bits becomes the number of clear |
| 2811 | // bits, and the rotation count is based on position b rather than position |
| 2812 | // a (since b is the location of the 'lowest' 1 bit after inversion). |
| 2813 | s = d - s; |
| 2814 | r = (clz_b + 1) & (d - 1); |
| 2815 | } else { |
| 2816 | r = (clz_a + 1) & (d - 1); |
| 2817 | } |
| 2818 | |
| 2819 | // Now we're done, except for having to encode the S output in such a way that |
| 2820 | // it gives both the number of set bits and the length of the repeated |
| 2821 | // segment. The s field is encoded like this: |
| 2822 | // |
| 2823 | // imms size S |
| 2824 | // ssssss 64 UInt(ssssss) |
| 2825 | // 0sssss 32 UInt(sssss) |
| 2826 | // 10ssss 16 UInt(ssss) |
| 2827 | // 110sss 8 UInt(sss) |
| 2828 | // 1110ss 4 UInt(ss) |
| 2829 | // 11110s 2 UInt(s) |
| 2830 | // |
| 2831 | // So we 'or' (-d << 1) with our computed s to form imms. |
| 2832 | *n = out_n; |
| 2833 | *imm_s = ((-d << 1) | (s - 1)) & 0x3f; |
| 2834 | *imm_r = r; |
| 2835 | |
| 2836 | return true; |
| 2837 | } |
| 2838 | |
| 2839 | |
| 2840 | bool Assembler::IsImmConditionalCompare(int64_t immediate) { |
| 2841 | return is_uint5(immediate); |
| 2842 | } |
| 2843 | |
| 2844 | |
| 2845 | bool Assembler::IsImmFP32(float imm) { |
| 2846 | // Valid values will have the form: |
| 2847 | // aBbb.bbbc.defg.h000.0000.0000.0000.0000 |
| 2848 | uint32_t bits = float_to_rawbits(imm); |
| 2849 | // bits[19..0] are cleared. |
| 2850 | if ((bits & 0x7ffff) != 0) { |
| 2851 | return false; |
| 2852 | } |
| 2853 | |
| 2854 | // bits[29..25] are all set or all cleared. |
| 2855 | uint32_t b_pattern = (bits >> 16) & 0x3e00; |
| 2856 | if (b_pattern != 0 && b_pattern != 0x3e00) { |
| 2857 | return false; |
| 2858 | } |
| 2859 | |
| 2860 | // bit[30] and bit[29] are opposite. |
| 2861 | if (((bits ^ (bits << 1)) & 0x40000000) == 0) { |
| 2862 | return false; |
| 2863 | } |
| 2864 | |
| 2865 | return true; |
| 2866 | } |
| 2867 | |
| 2868 | |
| 2869 | bool Assembler::IsImmFP64(double imm) { |
| 2870 | // Valid values will have the form: |
| 2871 | // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 |
| 2872 | // 0000.0000.0000.0000.0000.0000.0000.0000 |
| 2873 | uint64_t bits = double_to_rawbits(imm); |
| 2874 | // bits[47..0] are cleared. |
| 2875 | if ((bits & 0xffffffffffffL) != 0) { |
| 2876 | return false; |
| 2877 | } |
| 2878 | |
| 2879 | // bits[61..54] are all set or all cleared. |
| 2880 | uint32_t b_pattern = (bits >> 48) & 0x3fc0; |
| 2881 | if (b_pattern != 0 && b_pattern != 0x3fc0) { |
| 2882 | return false; |
| 2883 | } |
| 2884 | |
| 2885 | // bit[62] and bit[61] are opposite. |
| 2886 | if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) { |
| 2887 | return false; |
| 2888 | } |
| 2889 | |
| 2890 | return true; |
| 2891 | } |
| 2892 | |
| 2893 | |
| 2894 | void Assembler::GrowBuffer() { |
| 2895 | if (!own_buffer_) FATAL("external code buffer is too small"); |
| 2896 | |
| 2897 | // Compute new buffer size. |
| 2898 | CodeDesc desc; // the new buffer |
| 2899 | if (buffer_size_ < 1 * MB) { |
| 2900 | desc.buffer_size = 2 * buffer_size_; |
| 2901 | } else { |
| 2902 | desc.buffer_size = buffer_size_ + 1 * MB; |
| 2903 | } |
| 2904 | CHECK_GT(desc.buffer_size, 0); // No overflow. |
| 2905 | |
| 2906 | byte* buffer = reinterpret_cast<byte*>(buffer_); |
| 2907 | |
| 2908 | // Set up new buffer. |
| 2909 | desc.buffer = NewArray<byte>(desc.buffer_size); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2910 | desc.origin = this; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2911 | |
| 2912 | desc.instr_size = pc_offset(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2913 | desc.reloc_size = |
| 2914 | static_cast<int>((buffer + buffer_size_) - reloc_info_writer.pos()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2915 | |
| 2916 | // Copy the data. |
| 2917 | intptr_t pc_delta = desc.buffer - buffer; |
| 2918 | intptr_t rc_delta = (desc.buffer + desc.buffer_size) - |
| 2919 | (buffer + buffer_size_); |
| 2920 | memmove(desc.buffer, buffer, desc.instr_size); |
| 2921 | memmove(reloc_info_writer.pos() + rc_delta, |
| 2922 | reloc_info_writer.pos(), desc.reloc_size); |
| 2923 | |
| 2924 | // Switch buffers. |
| 2925 | DeleteArray(buffer_); |
| 2926 | buffer_ = desc.buffer; |
| 2927 | buffer_size_ = desc.buffer_size; |
| 2928 | pc_ = reinterpret_cast<byte*>(pc_) + pc_delta; |
| 2929 | reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
| 2930 | reloc_info_writer.last_pc() + pc_delta); |
| 2931 | |
| 2932 | // None of our relocation types are pc relative pointing outside the code |
| 2933 | // buffer nor pc absolute pointing inside the code buffer, so there is no need |
| 2934 | // to relocate any emitted relocation entries. |
| 2935 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2936 | // Relocate internal references. |
| 2937 | for (auto pos : internal_reference_positions_) { |
| 2938 | intptr_t* p = reinterpret_cast<intptr_t*>(buffer_ + pos); |
| 2939 | *p += pc_delta; |
| 2940 | } |
| 2941 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2942 | // Pending relocation entries are also relative, no need to relocate. |
| 2943 | } |
| 2944 | |
| 2945 | |
| 2946 | void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
| 2947 | // We do not try to reuse pool constants. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2948 | RelocInfo rinfo(isolate(), reinterpret_cast<byte*>(pc_), rmode, data, NULL); |
| 2949 | if (((rmode >= RelocInfo::COMMENT) && |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2950 | (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) || |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2951 | (rmode == RelocInfo::INTERNAL_REFERENCE) || |
| 2952 | (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) || |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 2953 | (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID) || |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2954 | (rmode == RelocInfo::GENERATOR_CONTINUATION)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2955 | // Adjust code for new modes. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2956 | DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) || |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 2957 | RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) || |
| 2958 | RelocInfo::IsPosition(rmode) || |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2959 | RelocInfo::IsInternalReference(rmode) || |
| 2960 | RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode) || |
| 2961 | RelocInfo::IsGeneratorContinuation(rmode)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2962 | // These modes do not need an entry in the constant pool. |
| 2963 | } else { |
| 2964 | constpool_.RecordEntry(data, rmode); |
| 2965 | // Make sure the constant pool is not emitted in place of the next |
| 2966 | // instruction for which we just recorded relocation info. |
| 2967 | BlockConstPoolFor(1); |
| 2968 | } |
| 2969 | |
| 2970 | if (!RelocInfo::IsNone(rmode)) { |
| 2971 | // Don't record external references unless the heap will be serialized. |
| 2972 | if (rmode == RelocInfo::EXTERNAL_REFERENCE && |
| 2973 | !serializer_enabled() && !emit_debug_code()) { |
| 2974 | return; |
| 2975 | } |
| 2976 | DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here |
| 2977 | if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2978 | RelocInfo reloc_info_with_ast_id(isolate(), reinterpret_cast<byte*>(pc_), |
| 2979 | rmode, RecordedAstId().ToInt(), NULL); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2980 | ClearRecordedAstId(); |
| 2981 | reloc_info_writer.Write(&reloc_info_with_ast_id); |
| 2982 | } else { |
| 2983 | reloc_info_writer.Write(&rinfo); |
| 2984 | } |
| 2985 | } |
| 2986 | } |
| 2987 | |
| 2988 | |
| 2989 | void Assembler::BlockConstPoolFor(int instructions) { |
| 2990 | int pc_limit = pc_offset() + instructions * kInstructionSize; |
| 2991 | if (no_const_pool_before_ < pc_limit) { |
| 2992 | no_const_pool_before_ = pc_limit; |
| 2993 | // Make sure the pool won't be blocked for too long. |
| 2994 | DCHECK(pc_limit < constpool_.MaxPcOffset()); |
| 2995 | } |
| 2996 | |
| 2997 | if (next_constant_pool_check_ < no_const_pool_before_) { |
| 2998 | next_constant_pool_check_ = no_const_pool_before_; |
| 2999 | } |
| 3000 | } |
| 3001 | |
| 3002 | |
| 3003 | void Assembler::CheckConstPool(bool force_emit, bool require_jump) { |
| 3004 | // Some short sequence of instruction mustn't be broken up by constant pool |
| 3005 | // emission, such sequences are protected by calls to BlockConstPoolFor and |
| 3006 | // BlockConstPoolScope. |
| 3007 | if (is_const_pool_blocked()) { |
| 3008 | // Something is wrong if emission is forced and blocked at the same time. |
| 3009 | DCHECK(!force_emit); |
| 3010 | return; |
| 3011 | } |
| 3012 | |
| 3013 | // There is nothing to do if there are no pending constant pool entries. |
| 3014 | if (constpool_.IsEmpty()) { |
| 3015 | // Calculate the offset of the next check. |
| 3016 | SetNextConstPoolCheckIn(kCheckConstPoolInterval); |
| 3017 | return; |
| 3018 | } |
| 3019 | |
| 3020 | // We emit a constant pool when: |
| 3021 | // * requested to do so by parameter force_emit (e.g. after each function). |
| 3022 | // * the distance to the first instruction accessing the constant pool is |
| 3023 | // kApproxMaxDistToConstPool or more. |
| 3024 | // * the number of entries in the pool is kApproxMaxPoolEntryCount or more. |
| 3025 | int dist = constpool_.DistanceToFirstUse(); |
| 3026 | int count = constpool_.EntryCount(); |
| 3027 | if (!force_emit && |
| 3028 | (dist < kApproxMaxDistToConstPool) && |
| 3029 | (count < kApproxMaxPoolEntryCount)) { |
| 3030 | return; |
| 3031 | } |
| 3032 | |
| 3033 | |
| 3034 | // Emit veneers for branches that would go out of range during emission of the |
| 3035 | // constant pool. |
| 3036 | int worst_case_size = constpool_.WorstCaseSize(); |
| 3037 | CheckVeneerPool(false, require_jump, |
| 3038 | kVeneerDistanceMargin + worst_case_size); |
| 3039 | |
| 3040 | // Check that the code buffer is large enough before emitting the constant |
| 3041 | // pool (this includes the gap to the relocation information). |
| 3042 | int needed_space = worst_case_size + kGap + 1 * kInstructionSize; |
| 3043 | while (buffer_space() <= needed_space) { |
| 3044 | GrowBuffer(); |
| 3045 | } |
| 3046 | |
| 3047 | Label size_check; |
| 3048 | bind(&size_check); |
| 3049 | constpool_.Emit(require_jump); |
| 3050 | DCHECK(SizeOfCodeGeneratedSince(&size_check) <= |
| 3051 | static_cast<unsigned>(worst_case_size)); |
| 3052 | |
| 3053 | // Since a constant pool was just emitted, move the check offset forward by |
| 3054 | // the standard interval. |
| 3055 | SetNextConstPoolCheckIn(kCheckConstPoolInterval); |
| 3056 | } |
| 3057 | |
| 3058 | |
| 3059 | bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) { |
| 3060 | // Account for the branch around the veneers and the guard. |
| 3061 | int protection_offset = 2 * kInstructionSize; |
| 3062 | return pc_offset() > max_reachable_pc - margin - protection_offset - |
| 3063 | static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize); |
| 3064 | } |
| 3065 | |
| 3066 | |
| 3067 | void Assembler::RecordVeneerPool(int location_offset, int size) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3068 | RelocInfo rinfo(isolate(), buffer_ + location_offset, RelocInfo::VENEER_POOL, |
| 3069 | static_cast<intptr_t>(size), NULL); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3070 | reloc_info_writer.Write(&rinfo); |
| 3071 | } |
| 3072 | |
| 3073 | |
| 3074 | void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) { |
| 3075 | BlockPoolsScope scope(this); |
| 3076 | RecordComment("[ Veneers"); |
| 3077 | |
| 3078 | // The exact size of the veneer pool must be recorded (see the comment at the |
| 3079 | // declaration site of RecordConstPool()), but computing the number of |
| 3080 | // veneers that will be generated is not obvious. So instead we remember the |
| 3081 | // current position and will record the size after the pool has been |
| 3082 | // generated. |
| 3083 | Label size_check; |
| 3084 | bind(&size_check); |
| 3085 | int veneer_pool_relocinfo_loc = pc_offset(); |
| 3086 | |
| 3087 | Label end; |
| 3088 | if (need_protection) { |
| 3089 | b(&end); |
| 3090 | } |
| 3091 | |
| 3092 | EmitVeneersGuard(); |
| 3093 | |
| 3094 | Label veneer_size_check; |
| 3095 | |
| 3096 | std::multimap<int, FarBranchInfo>::iterator it, it_to_delete; |
| 3097 | |
| 3098 | it = unresolved_branches_.begin(); |
| 3099 | while (it != unresolved_branches_.end()) { |
| 3100 | if (force_emit || ShouldEmitVeneer(it->first, margin)) { |
| 3101 | Instruction* branch = InstructionAt(it->second.pc_offset_); |
| 3102 | Label* label = it->second.label_; |
| 3103 | |
| 3104 | #ifdef DEBUG |
| 3105 | bind(&veneer_size_check); |
| 3106 | #endif |
| 3107 | // Patch the branch to point to the current position, and emit a branch |
| 3108 | // to the label. |
| 3109 | Instruction* veneer = reinterpret_cast<Instruction*>(pc_); |
| 3110 | RemoveBranchFromLabelLinkChain(branch, label, veneer); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3111 | branch->SetImmPCOffsetTarget(isolate(), veneer); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3112 | b(label); |
| 3113 | #ifdef DEBUG |
| 3114 | DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <= |
| 3115 | static_cast<uint64_t>(kMaxVeneerCodeSize)); |
| 3116 | veneer_size_check.Unuse(); |
| 3117 | #endif |
| 3118 | |
| 3119 | it_to_delete = it++; |
| 3120 | unresolved_branches_.erase(it_to_delete); |
| 3121 | } else { |
| 3122 | ++it; |
| 3123 | } |
| 3124 | } |
| 3125 | |
| 3126 | // Record the veneer pool size. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3127 | int pool_size = static_cast<int>(SizeOfCodeGeneratedSince(&size_check)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3128 | RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size); |
| 3129 | |
| 3130 | if (unresolved_branches_.empty()) { |
| 3131 | next_veneer_pool_check_ = kMaxInt; |
| 3132 | } else { |
| 3133 | next_veneer_pool_check_ = |
| 3134 | unresolved_branches_first_limit() - kVeneerDistanceCheckMargin; |
| 3135 | } |
| 3136 | |
| 3137 | bind(&end); |
| 3138 | |
| 3139 | RecordComment("]"); |
| 3140 | } |
| 3141 | |
| 3142 | |
| 3143 | void Assembler::CheckVeneerPool(bool force_emit, bool require_jump, |
| 3144 | int margin) { |
| 3145 | // There is nothing to do if there are no pending veneer pool entries. |
| 3146 | if (unresolved_branches_.empty()) { |
| 3147 | DCHECK(next_veneer_pool_check_ == kMaxInt); |
| 3148 | return; |
| 3149 | } |
| 3150 | |
| 3151 | DCHECK(pc_offset() < unresolved_branches_first_limit()); |
| 3152 | |
| 3153 | // Some short sequence of instruction mustn't be broken up by veneer pool |
| 3154 | // emission, such sequences are protected by calls to BlockVeneerPoolFor and |
| 3155 | // BlockVeneerPoolScope. |
| 3156 | if (is_veneer_pool_blocked()) { |
| 3157 | DCHECK(!force_emit); |
| 3158 | return; |
| 3159 | } |
| 3160 | |
| 3161 | if (!require_jump) { |
| 3162 | // Prefer emitting veneers protected by an existing instruction. |
| 3163 | margin *= kVeneerNoProtectionFactor; |
| 3164 | } |
| 3165 | if (force_emit || ShouldEmitVeneers(margin)) { |
| 3166 | EmitVeneers(force_emit, require_jump, margin); |
| 3167 | } else { |
| 3168 | next_veneer_pool_check_ = |
| 3169 | unresolved_branches_first_limit() - kVeneerDistanceCheckMargin; |
| 3170 | } |
| 3171 | } |
| 3172 | |
| 3173 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3174 | int Assembler::buffer_space() const { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3175 | return static_cast<int>(reloc_info_writer.pos() - |
| 3176 | reinterpret_cast<byte*>(pc_)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3177 | } |
| 3178 | |
| 3179 | |
| 3180 | void Assembler::RecordConstPool(int size) { |
| 3181 | // We only need this for debugger support, to correctly compute offsets in the |
| 3182 | // code. |
| 3183 | RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size)); |
| 3184 | } |
| 3185 | |
| 3186 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3187 | void PatchingAssembler::PatchAdrFar(int64_t target_offset) { |
| 3188 | // The code at the current instruction should be: |
| 3189 | // adr rd, 0 |
| 3190 | // nop (adr_far) |
| 3191 | // nop (adr_far) |
| 3192 | // movz scratch, 0 |
| 3193 | |
| 3194 | // Verify the expected code. |
| 3195 | Instruction* expected_adr = InstructionAt(0); |
| 3196 | CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0)); |
| 3197 | int rd_code = expected_adr->Rd(); |
| 3198 | for (int i = 0; i < kAdrFarPatchableNNops; ++i) { |
| 3199 | CHECK(InstructionAt((i + 1) * kInstructionSize)->IsNop(ADR_FAR_NOP)); |
| 3200 | } |
| 3201 | Instruction* expected_movz = |
| 3202 | InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize); |
| 3203 | CHECK(expected_movz->IsMovz() && |
| 3204 | (expected_movz->ImmMoveWide() == 0) && |
| 3205 | (expected_movz->ShiftMoveWide() == 0)); |
| 3206 | int scratch_code = expected_movz->Rd(); |
| 3207 | |
| 3208 | // Patch to load the correct address. |
| 3209 | Register rd = Register::XRegFromCode(rd_code); |
| 3210 | Register scratch = Register::XRegFromCode(scratch_code); |
| 3211 | // Addresses are only 48 bits. |
| 3212 | adr(rd, target_offset & 0xFFFF); |
| 3213 | movz(scratch, (target_offset >> 16) & 0xFFFF, 16); |
| 3214 | movk(scratch, (target_offset >> 32) & 0xFFFF, 32); |
| 3215 | DCHECK((target_offset >> 48) == 0); |
| 3216 | add(rd, rd, scratch); |
| 3217 | } |
| 3218 | |
| 3219 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3220 | } // namespace internal |
| 3221 | } // namespace v8 |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3222 | |
| 3223 | #endif // V8_TARGET_ARCH_ARM64 |