Fixes to build against new VIXL interface.
- Fix namespace usage and use of deprecated functions.
- Link all dependants to new libvixl-arm64 target for now.
Change-Id: Iee6f299784fd663fc2a759f3ee816fdbc511e509
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 7cdcea2..54b009a 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -33,8 +33,7 @@
#include "utils/assembler.h"
#include "utils/stack_checks.h"
-
-using namespace vixl; // NOLINT(build/namespaces)
+using namespace vixl::aarch64; // NOLINT(build/namespaces)
#ifdef __
#error "ARM64 Codegen VIXL macro-assembler macro already defined."
@@ -147,20 +146,20 @@
codegen->GetNumberOfFloatingPointRegisters()));
CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
- register_set->GetCoreRegisters() & (~callee_saved_core_registers.list()));
+ register_set->GetCoreRegisters() & (~callee_saved_core_registers.GetList()));
CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
- register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.list()));
+ register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.GetList()));
MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
Register base = masm->StackPointer();
- int64_t core_spill_size = core_list.TotalSizeInBytes();
- int64_t fp_spill_size = fp_list.TotalSizeInBytes();
+ int64_t core_spill_size = core_list.GetTotalSizeInBytes();
+ int64_t fp_spill_size = fp_list.GetTotalSizeInBytes();
int64_t reg_size = kXRegSizeInBytes;
int64_t max_ls_pair_offset = spill_offset + core_spill_size + fp_spill_size - 2 * reg_size;
uint32_t ls_access_size = WhichPowerOf2(reg_size);
- if (((core_list.Count() > 1) || (fp_list.Count() > 1)) &&
+ if (((core_list.GetCount() > 1) || (fp_list.GetCount() > 1)) &&
!masm->IsImmLSPair(max_ls_pair_offset, ls_access_size)) {
// If the offset does not fit in the instruction's immediate field, use an alternate register
// to compute the base address(float point registers spill base address).
@@ -411,7 +410,7 @@
}
}
- vixl::Label* GetReturnLabel() {
+ vixl::aarch64::Label* GetReturnLabel() {
DCHECK(successor_ == nullptr);
return &return_label_;
}
@@ -427,7 +426,7 @@
HBasicBlock* const successor_;
// If `successor_` is null, the label to branch to after the suspend check.
- vixl::Label return_label_;
+ vixl::aarch64::Label return_label_;
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
};
@@ -567,9 +566,9 @@
__ Bind(&table_start_);
const ArenaVector<HBasicBlock*>& successors = switch_instr_->GetBlock()->GetSuccessors();
for (uint32_t i = 0; i < num_entries; i++) {
- vixl::Label* target_label = codegen->GetLabelOf(successors[i]);
+ vixl::aarch64::Label* target_label = codegen->GetLabelOf(successors[i]);
DCHECK(target_label->IsBound());
- ptrdiff_t jump_offset = target_label->location() - table_start_.location();
+ ptrdiff_t jump_offset = target_label->GetLocation() - table_start_.GetLocation();
DCHECK_GT(jump_offset, std::numeric_limits<int32_t>::min());
DCHECK_LE(jump_offset, std::numeric_limits<int32_t>::max());
Literal<int32_t> literal(jump_offset);
@@ -790,8 +789,8 @@
private:
Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
- size_t ref = static_cast<int>(XRegisterFrom(ref_).code());
- size_t obj = static_cast<int>(XRegisterFrom(obj_).code());
+ size_t ref = static_cast<int>(XRegisterFrom(ref_).GetCode());
+ size_t obj = static_cast<int>(XRegisterFrom(obj_).GetCode());
for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
if (i != ref && i != obj && !codegen->IsCoreCalleeSaveRegister(i)) {
return Register(VIXLRegCodeFromART(i), kXRegSize);
@@ -909,8 +908,8 @@
kNumberOfAllocatableRegisters,
kNumberOfAllocatableFPRegisters,
kNumberOfAllocatableRegisterPairs,
- callee_saved_core_registers.list(),
- callee_saved_fp_registers.list(),
+ callee_saved_core_registers.GetList(),
+ callee_saved_fp_registers.GetList(),
compiler_options,
stats),
block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
@@ -1060,17 +1059,17 @@
GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
}
-vixl::CPURegList CodeGeneratorARM64::GetFramePreservedCoreRegisters() const {
+CPURegList CodeGeneratorARM64::GetFramePreservedCoreRegisters() const {
DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spill_mask_, GetNumberOfCoreRegisters(), 0, 0));
- return vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize,
- core_spill_mask_);
+ return CPURegList(CPURegister::kRegister, kXRegSize,
+ core_spill_mask_);
}
-vixl::CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const {
+CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const {
DCHECK(ArtVixlRegCodeCoherentForRegSet(0, 0, fpu_spill_mask_,
GetNumberOfFloatingPointRegisters()));
- return vixl::CPURegList(vixl::CPURegister::kFPRegister, vixl::kDRegSize,
- fpu_spill_mask_);
+ return CPURegList(CPURegister::kFPRegister, kDRegSize,
+ fpu_spill_mask_);
}
void CodeGeneratorARM64::Bind(HBasicBlock* block) {
@@ -1094,7 +1093,7 @@
UseScratchRegisterScope temps(GetVIXLAssembler());
Register card = temps.AcquireX();
Register temp = temps.AcquireW(); // Index within the CardTable - 32bit.
- vixl::Label done;
+ vixl::aarch64::Label done;
if (value_can_be_null) {
__ Cbz(value, &done);
}
@@ -1119,12 +1118,12 @@
CPURegList reserved_core_registers = vixl_reserved_core_registers;
reserved_core_registers.Combine(runtime_reserved_core_registers);
while (!reserved_core_registers.IsEmpty()) {
- blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
+ blocked_core_registers_[reserved_core_registers.PopLowestIndex().GetCode()] = true;
}
CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
while (!reserved_fp_registers.IsEmpty()) {
- blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
+ blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().GetCode()] = true;
}
if (GetGraph()->IsDebuggable()) {
@@ -1133,7 +1132,7 @@
// now, just block them.
CPURegList reserved_fp_registers_debuggable = callee_saved_fp_registers;
while (!reserved_fp_registers_debuggable.IsEmpty()) {
- blocked_fpu_registers_[reserved_fp_registers_debuggable.PopLowestIndex().code()] = true;
+ blocked_fpu_registers_[reserved_fp_registers_debuggable.PopLowestIndex().GetCode()] = true;
}
}
}
@@ -1344,7 +1343,7 @@
DCHECK(!src.IsPostIndex());
// TODO(vixl): Let the MacroAssembler handle MemOperand.
- __ Add(temp_base, src.base(), OperandFromMemOperand(src));
+ __ Add(temp_base, src.GetBaseRegister(), OperandFromMemOperand(src));
MemOperand base = MemOperand(temp_base);
switch (type) {
case Primitive::kPrimBoolean:
@@ -1436,7 +1435,7 @@
// TODO(vixl): Let the MacroAssembler handle this.
Operand op = OperandFromMemOperand(dst);
- __ Add(temp_base, dst.base(), op);
+ __ Add(temp_base, dst.GetBaseRegister(), op);
MemOperand base = MemOperand(temp_base);
switch (type) {
case Primitive::kPrimBoolean:
@@ -1490,7 +1489,7 @@
}
void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
- vixl::Register class_reg) {
+ Register class_reg) {
UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp = temps.AcquireW();
size_t status_offset = mirror::Class::StatusOffset().SizeValue();
@@ -1755,7 +1754,7 @@
__ Sub(dst, lhs, rhs);
} else if (instr->IsRor()) {
if (rhs.IsImmediate()) {
- uint32_t shift = rhs.immediate() & (lhs.SizeInBits() - 1);
+ uint32_t shift = rhs.GetImmediate() & (lhs.GetSizeInBits() - 1);
__ Ror(dst, lhs, shift);
} else {
// Ensure shift distance is in the same size register as the result. If
@@ -1818,7 +1817,7 @@
Register lhs = InputRegisterAt(instr, 0);
Operand rhs = InputOperandAt(instr, 1);
if (rhs.IsImmediate()) {
- uint32_t shift_value = rhs.immediate() &
+ uint32_t shift_value = rhs.GetImmediate() &
(type == Primitive::kPrimInt ? kMaxIntShiftDistance : kMaxLongShiftDistance);
if (instr->IsShl()) {
__ Lsl(dst, lhs, shift_value);
@@ -1828,7 +1827,7 @@
__ Lsr(dst, lhs, shift_value);
}
} else {
- Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W();
+ Register rhs_reg = dst.IsX() ? rhs.GetRegister().X() : rhs.GetRegister().W();
if (instr->IsShl()) {
__ Lsl(dst, lhs, rhs_reg);
@@ -2014,13 +2013,14 @@
if (instr->GetType() == Primitive::kPrimLong &&
codegen_->GetInstructionSetFeatures().NeedFixCortexA53_835769()) {
MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen_)->GetVIXLAssembler();
- vixl::Instruction* prev = masm->GetCursorAddress<vixl::Instruction*>() - vixl::kInstructionSize;
+ vixl::aarch64::Instruction* prev =
+ masm->GetCursorAddress<vixl::aarch64::Instruction*>() - kInstructionSize;
if (prev->IsLoadOrStore()) {
// Make sure we emit only exactly one nop.
- vixl::CodeBufferCheckScope scope(masm,
- vixl::kInstructionSize,
- vixl::CodeBufferCheckScope::kCheck,
- vixl::CodeBufferCheckScope::kExactSize);
+ vixl::aarch64::CodeBufferCheckScope scope(masm,
+ kInstructionSize,
+ vixl::aarch64::CodeBufferCheckScope::kCheck,
+ vixl::aarch64::CodeBufferCheckScope::kExactSize);
__ nop();
}
}
@@ -2210,7 +2210,7 @@
} else {
DCHECK(needs_write_barrier);
DCHECK(!instruction->GetArray()->IsArm64IntermediateAddress());
- vixl::Label done;
+ vixl::aarch64::Label done;
SlowPathCodeARM64* slow_path = nullptr;
{
// We use a block to end the scratch scope before the write barrier, thus
@@ -2235,7 +2235,7 @@
slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
- vixl::Label non_zero;
+ vixl::aarch64::Label non_zero;
__ Cbnz(Register(value), &non_zero);
if (!index.IsConstant()) {
__ Add(temp, array, offset);
@@ -2289,7 +2289,7 @@
__ Cmp(temp, temp2);
if (instruction->StaticTypeOfArrayIsObjectArray()) {
- vixl::Label do_put;
+ vixl::aarch64::Label do_put;
__ B(eq, &do_put);
// If heap poisoning is enabled, the `temp` reference has
// not been unpoisoned yet; unpoison it now.
@@ -2822,11 +2822,11 @@
void InstructionCodeGeneratorARM64::GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
- vixl::Label* true_target,
- vixl::Label* false_target) {
+ vixl::aarch64::Label* true_target,
+ vixl::aarch64::Label* false_target) {
// FP branching requires both targets to be explicit. If either of the targets
// is nullptr (fallthrough) use and bind `fallthrough_target` instead.
- vixl::Label fallthrough_target;
+ vixl::aarch64::Label fallthrough_target;
HInstruction* cond = instruction->InputAt(condition_input_index);
if (true_target == nullptr && false_target == nullptr) {
@@ -2884,7 +2884,7 @@
Operand rhs = InputOperandAt(condition, 1);
Condition arm64_cond;
- vixl::Label* non_fallthrough_target;
+ vixl::aarch64::Label* non_fallthrough_target;
if (true_target == nullptr) {
arm64_cond = ARM64Condition(condition->GetOppositeCondition());
non_fallthrough_target = false_target;
@@ -2894,7 +2894,7 @@
}
if ((arm64_cond == eq || arm64_cond == ne || arm64_cond == lt || arm64_cond == ge) &&
- rhs.IsImmediate() && (rhs.immediate() == 0)) {
+ rhs.IsImmediate() && (rhs.GetImmediate() == 0)) {
switch (arm64_cond) {
case eq:
__ Cbz(lhs, non_fallthrough_target);
@@ -2943,10 +2943,14 @@
void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
- vixl::Label* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
- nullptr : codegen_->GetLabelOf(true_successor);
- vixl::Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
- nullptr : codegen_->GetLabelOf(false_successor);
+ vixl::aarch64::Label* true_target = codegen_->GetLabelOf(true_successor);
+ if (codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor)) {
+ true_target = nullptr;
+ }
+ vixl::aarch64::Label* false_target = codegen_->GetLabelOf(false_successor);
+ if (codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor)) {
+ false_target = nullptr;
+ }
GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
}
@@ -3130,7 +3134,7 @@
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
- vixl::Label done, zero;
+ vixl::aarch64::Label done, zero;
SlowPathCodeARM64* slow_path = nullptr;
// Return 0 if `obj` is null.
@@ -3155,7 +3159,7 @@
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- vixl::Label loop, success;
+ vixl::aarch64::Label loop, success;
__ Bind(&loop);
// /* HeapReference<Class> */ out = out->super_class_
GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
@@ -3172,7 +3176,7 @@
case TypeCheckKind::kClassHierarchyCheck: {
// Walk over the class hierarchy to find a match.
- vixl::Label loop, success;
+ vixl::aarch64::Label loop, success;
__ Bind(&loop);
__ Cmp(out, cls);
__ B(eq, &success);
@@ -3191,7 +3195,7 @@
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
- vixl::Label exact_check;
+ vixl::aarch64::Label exact_check;
__ Cmp(out, cls);
__ B(eq, &exact_check);
// Otherwise, we need to check that the object's class is a non-primitive array.
@@ -3328,7 +3332,7 @@
is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
- vixl::Label done;
+ vixl::aarch64::Label done;
// Avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
__ Cbz(obj, &done);
@@ -3350,7 +3354,7 @@
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- vixl::Label loop, compare_classes;
+ vixl::aarch64::Label loop, compare_classes;
__ Bind(&loop);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
@@ -3377,7 +3381,7 @@
case TypeCheckKind::kClassHierarchyCheck: {
// Walk over the class hierarchy to find a match.
- vixl::Label loop;
+ vixl::aarch64::Label loop;
__ Bind(&loop);
__ Cmp(temp, cls);
__ B(eq, &done);
@@ -3402,7 +3406,7 @@
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
- vixl::Label check_non_primitive_component_type;
+ vixl::aarch64::Label check_non_primitive_component_type;
__ Cmp(temp, cls);
__ B(eq, &done);
@@ -3628,17 +3632,17 @@
// Add ADRP with its PC-relative DexCache access patch.
const DexFile& dex_file = *invoke->GetTargetMethod().dex_file;
uint32_t element_offset = invoke->GetDexCacheArrayOffset();
- vixl::Label* adrp_label = NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
+ vixl::aarch64::Label* adrp_label = NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(adrp_label);
__ adrp(XRegisterFrom(temp), /* offset placeholder */ 0);
}
// Add LDR with its PC-relative DexCache access patch.
- vixl::Label* ldr_label =
+ vixl::aarch64::Label* ldr_label =
NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(ldr_label);
__ ldr(XRegisterFrom(temp), MemOperand(XRegisterFrom(temp), /* offset placeholder */ 0));
}
@@ -3675,8 +3679,8 @@
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
relative_call_patches_.emplace_back(invoke->GetTargetMethod());
- vixl::Label* label = &relative_call_patches_.back().label;
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ vixl::aarch64::Label* label = &relative_call_patches_.back().label;
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(label);
__ bl(0); // Branch and link to itself. This will be overriden at link time.
break;
@@ -3735,58 +3739,64 @@
__ Blr(lr);
}
-vixl::Label* CodeGeneratorARM64::NewPcRelativeStringPatch(const DexFile& dex_file,
- uint32_t string_index,
- vixl::Label* adrp_label) {
+vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeStringPatch(
+ const DexFile& dex_file,
+ uint32_t string_index,
+ vixl::aarch64::Label* adrp_label) {
return NewPcRelativePatch(dex_file, string_index, adrp_label, &pc_relative_string_patches_);
}
-vixl::Label* CodeGeneratorARM64::NewPcRelativeTypePatch(const DexFile& dex_file,
- uint32_t type_index,
- vixl::Label* adrp_label) {
+vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeTypePatch(
+ const DexFile& dex_file,
+ uint32_t type_index,
+ vixl::aarch64::Label* adrp_label) {
return NewPcRelativePatch(dex_file, type_index, adrp_label, &pc_relative_type_patches_);
}
-vixl::Label* CodeGeneratorARM64::NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
- uint32_t element_offset,
- vixl::Label* adrp_label) {
+vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativeDexCacheArrayPatch(
+ const DexFile& dex_file,
+ uint32_t element_offset,
+ vixl::aarch64::Label* adrp_label) {
return NewPcRelativePatch(dex_file, element_offset, adrp_label, &pc_relative_dex_cache_patches_);
}
-vixl::Label* CodeGeneratorARM64::NewPcRelativePatch(const DexFile& dex_file,
- uint32_t offset_or_index,
- vixl::Label* adrp_label,
- ArenaDeque<PcRelativePatchInfo>* patches) {
+vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativePatch(
+ const DexFile& dex_file,
+ uint32_t offset_or_index,
+ vixl::aarch64::Label* adrp_label,
+ ArenaDeque<PcRelativePatchInfo>* patches) {
// Add a patch entry and return the label.
patches->emplace_back(dex_file, offset_or_index);
PcRelativePatchInfo* info = &patches->back();
- vixl::Label* label = &info->label;
+ vixl::aarch64::Label* label = &info->label;
// If adrp_label is null, this is the ADRP patch and needs to point to its own label.
info->pc_insn_label = (adrp_label != nullptr) ? adrp_label : label;
return label;
}
-vixl::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageStringLiteral(
+vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageStringLiteral(
const DexFile& dex_file, uint32_t string_index) {
return boot_image_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
}
-vixl::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageTypeLiteral(
+vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageTypeLiteral(
const DexFile& dex_file, uint32_t type_index) {
return boot_image_type_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
}
-vixl::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddressLiteral(uint64_t address) {
+vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddressLiteral(
+ uint64_t address) {
bool needs_patch = GetCompilerOptions().GetIncludePatchInformation();
Uint32ToLiteralMap* map = needs_patch ? &boot_image_address_patches_ : &uint32_literals_;
return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
}
-vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateDexCacheAddressLiteral(uint64_t address) {
+vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateDexCacheAddressLiteral(
+ uint64_t address) {
return DeduplicateUint64Literal(address);
}
@@ -3805,76 +3815,76 @@
linker_patches->reserve(size);
for (const auto& entry : method_patches_) {
const MethodReference& target_method = entry.first;
- vixl::Literal<uint64_t>* literal = entry.second;
- linker_patches->push_back(LinkerPatch::MethodPatch(literal->offset(),
+ vixl::aarch64::Literal<uint64_t>* literal = entry.second;
+ linker_patches->push_back(LinkerPatch::MethodPatch(literal->GetOffset(),
target_method.dex_file,
target_method.dex_method_index));
}
for (const auto& entry : call_patches_) {
const MethodReference& target_method = entry.first;
- vixl::Literal<uint64_t>* literal = entry.second;
- linker_patches->push_back(LinkerPatch::CodePatch(literal->offset(),
+ vixl::aarch64::Literal<uint64_t>* literal = entry.second;
+ linker_patches->push_back(LinkerPatch::CodePatch(literal->GetOffset(),
target_method.dex_file,
target_method.dex_method_index));
}
- for (const MethodPatchInfo<vixl::Label>& info : relative_call_patches_) {
- linker_patches->push_back(LinkerPatch::RelativeCodePatch(info.label.location(),
+ for (const MethodPatchInfo<vixl::aarch64::Label>& info : relative_call_patches_) {
+ linker_patches->push_back(LinkerPatch::RelativeCodePatch(info.label.GetLocation(),
info.target_method.dex_file,
info.target_method.dex_method_index));
}
for (const PcRelativePatchInfo& info : pc_relative_dex_cache_patches_) {
- linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(info.label.location(),
+ linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(info.label.GetLocation(),
&info.target_dex_file,
- info.pc_insn_label->location(),
+ info.pc_insn_label->GetLocation(),
info.offset_or_index));
}
for (const auto& entry : boot_image_string_patches_) {
const StringReference& target_string = entry.first;
- vixl::Literal<uint32_t>* literal = entry.second;
- linker_patches->push_back(LinkerPatch::StringPatch(literal->offset(),
+ vixl::aarch64::Literal<uint32_t>* literal = entry.second;
+ linker_patches->push_back(LinkerPatch::StringPatch(literal->GetOffset(),
target_string.dex_file,
target_string.string_index));
}
for (const PcRelativePatchInfo& info : pc_relative_string_patches_) {
- linker_patches->push_back(LinkerPatch::RelativeStringPatch(info.label.location(),
+ linker_patches->push_back(LinkerPatch::RelativeStringPatch(info.label.GetLocation(),
&info.target_dex_file,
- info.pc_insn_label->location(),
+ info.pc_insn_label->GetLocation(),
info.offset_or_index));
}
for (const auto& entry : boot_image_type_patches_) {
const TypeReference& target_type = entry.first;
- vixl::Literal<uint32_t>* literal = entry.second;
- linker_patches->push_back(LinkerPatch::TypePatch(literal->offset(),
+ vixl::aarch64::Literal<uint32_t>* literal = entry.second;
+ linker_patches->push_back(LinkerPatch::TypePatch(literal->GetOffset(),
target_type.dex_file,
target_type.type_index));
}
for (const PcRelativePatchInfo& info : pc_relative_type_patches_) {
- linker_patches->push_back(LinkerPatch::RelativeTypePatch(info.label.location(),
+ linker_patches->push_back(LinkerPatch::RelativeTypePatch(info.label.GetLocation(),
&info.target_dex_file,
- info.pc_insn_label->location(),
+ info.pc_insn_label->GetLocation(),
info.offset_or_index));
}
for (const auto& entry : boot_image_address_patches_) {
DCHECK(GetCompilerOptions().GetIncludePatchInformation());
- vixl::Literal<uint32_t>* literal = entry.second;
- linker_patches->push_back(LinkerPatch::RecordPosition(literal->offset()));
+ vixl::aarch64::Literal<uint32_t>* literal = entry.second;
+ linker_patches->push_back(LinkerPatch::RecordPosition(literal->GetOffset()));
}
}
-vixl::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value,
+vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value,
Uint32ToLiteralMap* map) {
return map->GetOrCreate(
value,
[this, value]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(value); });
}
-vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateUint64Literal(uint64_t value) {
+vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateUint64Literal(uint64_t value) {
return uint64_literals_.GetOrCreate(
value,
[this, value]() { return __ CreateLiteralDestroyedWithPool<uint64_t>(value); });
}
-vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodLiteral(
+vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodLiteral(
MethodReference target_method,
MethodToLiteralMap* map) {
return map->GetOrCreate(
@@ -3882,12 +3892,12 @@
[this]() { return __ CreateLiteralDestroyedWithPool<uint64_t>(/* placeholder */ 0u); });
}
-vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodAddressLiteral(
+vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodAddressLiteral(
MethodReference target_method) {
return DeduplicateMethodLiteral(target_method, &method_patches_);
}
-vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodCodeLiteral(
+vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodCodeLiteral(
MethodReference target_method) {
return DeduplicateMethodLiteral(target_method, &call_patches_);
}
@@ -3961,7 +3971,7 @@
CodeGenerator::CreateLoadClassLocationSummary(
cls,
LocationFrom(calling_convention.GetRegisterAt(0)),
- LocationFrom(vixl::x0),
+ LocationFrom(vixl::aarch64::x0),
/* code_generator_supports_read_barrier */ true);
return;
}
@@ -4013,16 +4023,17 @@
// Add ADRP with its PC-relative type patch.
const DexFile& dex_file = cls->GetDexFile();
uint32_t type_index = cls->GetTypeIndex();
- vixl::Label* adrp_label = codegen_->NewPcRelativeTypePatch(dex_file, type_index);
+ vixl::aarch64::Label* adrp_label = codegen_->NewPcRelativeTypePatch(dex_file, type_index);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(adrp_label);
__ adrp(out.X(), /* offset placeholder */ 0);
}
// Add ADD with its PC-relative type patch.
- vixl::Label* add_label = codegen_->NewPcRelativeTypePatch(dex_file, type_index, adrp_label);
+ vixl::aarch64::Label* add_label =
+ codegen_->NewPcRelativeTypePatch(dex_file, type_index, adrp_label);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(add_label);
__ add(out.X(), out.X(), Operand(/* offset placeholder */ 0));
}
@@ -4055,14 +4066,15 @@
// Add ADRP with its PC-relative DexCache access patch.
const DexFile& dex_file = cls->GetDexFile();
uint32_t element_offset = cls->GetDexCacheElementOffset();
- vixl::Label* adrp_label = codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
+ vixl::aarch64::Label* adrp_label =
+ codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(adrp_label);
__ adrp(out.X(), /* offset placeholder */ 0);
}
// Add LDR with its PC-relative DexCache access patch.
- vixl::Label* ldr_label =
+ vixl::aarch64::Label* ldr_label =
codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
// /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
GenerateGcRootFieldLoad(cls, out_loc, out.X(), /* offset placeholder */ 0, ldr_label);
@@ -4182,17 +4194,17 @@
// Add ADRP with its PC-relative String patch.
const DexFile& dex_file = load->GetDexFile();
uint32_t string_index = load->GetStringIndex();
- vixl::Label* adrp_label = codegen_->NewPcRelativeStringPatch(dex_file, string_index);
+ vixl::aarch64::Label* adrp_label = codegen_->NewPcRelativeStringPatch(dex_file, string_index);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(adrp_label);
__ adrp(out.X(), /* offset placeholder */ 0);
}
// Add ADD with its PC-relative String patch.
- vixl::Label* add_label =
+ vixl::aarch64::Label* add_label =
codegen_->NewPcRelativeStringPatch(dex_file, string_index, adrp_label);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(add_label);
__ add(out.X(), out.X(), Operand(/* offset placeholder */ 0));
}
@@ -4224,14 +4236,15 @@
// Add ADRP with its PC-relative DexCache access patch.
const DexFile& dex_file = load->GetDexFile();
uint32_t element_offset = load->GetDexCacheElementOffset();
- vixl::Label* adrp_label = codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
+ vixl::aarch64::Label* adrp_label =
+ codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
{
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(adrp_label);
__ adrp(out.X(), /* offset placeholder */ 0);
}
// Add LDR with its PC-relative DexCache access patch.
- vixl::Label* ldr_label =
+ vixl::aarch64::Label* ldr_label =
codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
// /* GcRoot<mirror::String> */ out = *(base_address + offset) /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, out.X(), /* offset placeholder */ 0, ldr_label);
@@ -4452,7 +4465,7 @@
}
void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) {
- __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), vixl::Operand(1));
+ __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), vixl::aarch64::Operand(1));
}
void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
@@ -4885,7 +4898,7 @@
HBasicBlock* default_block = switch_instr->GetDefaultBlock();
// Roughly set 16 as max average assemblies generated per HIR in a graph.
- static constexpr int32_t kMaxExpectedSizePerHInstruction = 16 * vixl::kInstructionSize;
+ static constexpr int32_t kMaxExpectedSizePerHInstruction = 16 * kInstructionSize;
// ADR has a limited range(+/-1MB), so we set a threshold for the number of HIRs in the graph to
// make sure we don't emit it if the target may run out of range.
// TODO: Instead of emitting all jump tables at the end of the code, we could keep track of ADR
@@ -5030,9 +5043,9 @@
void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(HInstruction* instruction,
Location root,
- vixl::Register obj,
+ Register obj,
uint32_t offset,
- vixl::Label* fixup_label) {
+ vixl::aarch64::Label* fixup_label) {
Register root_reg = RegisterFrom(root, Primitive::kPrimNot);
if (kEmitCompilerReadBarrier) {
if (kUseBakerReadBarrier) {
@@ -5048,7 +5061,7 @@
if (fixup_label == nullptr) {
__ Ldr(root_reg, MemOperand(obj, offset));
} else {
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(fixup_label);
__ ldr(root_reg, MemOperand(obj, offset));
}
@@ -5079,7 +5092,7 @@
if (fixup_label == nullptr) {
__ Add(root_reg.X(), obj.X(), offset);
} else {
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(fixup_label);
__ add(root_reg.X(), obj.X(), offset);
}
@@ -5092,7 +5105,7 @@
if (fixup_label == nullptr) {
__ Ldr(root_reg, MemOperand(obj, offset));
} else {
- vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
+ SingleEmissionCheckScope guard(GetVIXLAssembler());
__ Bind(fixup_label);
__ ldr(root_reg, MemOperand(obj, offset));
}
@@ -5103,7 +5116,7 @@
void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
- vixl::Register obj,
+ Register obj,
uint32_t offset,
Register temp,
bool needs_null_check,
@@ -5127,7 +5140,7 @@
void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
- vixl::Register obj,
+ Register obj,
uint32_t data_offset,
Location index,
Register temp,
@@ -5158,7 +5171,7 @@
void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
- vixl::Register obj,
+ Register obj,
uint32_t offset,
Location index,
size_t scale_factor,