Merge changes I506bc2a8,I7310de97,Ib3fd1110
* changes:
ART: Do not define abstract HIR kinds.
ART: Remove InstructionTypeEquals().
Store HIR type in HInstruction::packed_field_.
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 065c3de..2c428fa 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -37,6 +37,29 @@
#define ___ asm_.GetVIXLAssembler()->
#endif
+vixl::aarch32::Register AsVIXLRegister(ArmManagedRegister reg) {
+ CHECK(reg.IsCoreRegister());
+ return vixl::aarch32::Register(reg.RegId());
+}
+
+static inline vixl::aarch32::SRegister AsVIXLSRegister(ArmManagedRegister reg) {
+ CHECK(reg.IsSRegister());
+ return vixl::aarch32::SRegister(reg.RegId() - kNumberOfCoreRegIds);
+}
+
+static inline vixl::aarch32::DRegister AsVIXLDRegister(ArmManagedRegister reg) {
+ CHECK(reg.IsDRegister());
+ return vixl::aarch32::DRegister(reg.RegId() - kNumberOfCoreRegIds - kNumberOfSRegIds);
+}
+
+static inline vixl::aarch32::Register AsVIXLRegisterPairLow(ArmManagedRegister reg) {
+ return vixl::aarch32::Register(reg.AsRegisterPairLow());
+}
+
+static inline vixl::aarch32::Register AsVIXLRegisterPairHigh(ArmManagedRegister reg) {
+ return vixl::aarch32::Register(reg.AsRegisterPairHigh());
+}
+
void ArmVIXLJNIMacroAssembler::FinalizeCode() {
for (const std::unique_ptr<
ArmVIXLJNIMacroAssembler::ArmException>& exception : exception_blocks_) {
@@ -60,7 +83,7 @@
ArrayRef<const ManagedRegister> callee_save_regs,
const ManagedRegisterEntrySpills& entry_spills) {
CHECK_ALIGNED(frame_size, kStackAlignment);
- CHECK(r0.Is(method_reg.AsArm().AsVIXLRegister()));
+ CHECK(r0.Is(AsVIXLRegister(method_reg.AsArm())));
// Push callee saves and link register.
RegList core_spill_mask = 1 << LR;
@@ -104,13 +127,13 @@
ManagedRegisterSpill spill = entry_spills.at(i);
offset += spill.getSize();
} else if (reg.IsCoreRegister()) {
- asm_.StoreToOffset(kStoreWord, reg.AsVIXLRegister(), sp, offset);
+ asm_.StoreToOffset(kStoreWord, AsVIXLRegister(reg), sp, offset);
offset += 4;
} else if (reg.IsSRegister()) {
- asm_.StoreSToOffset(reg.AsVIXLSRegister(), sp, offset);
+ asm_.StoreSToOffset(AsVIXLSRegister(reg), sp, offset);
offset += 4;
} else if (reg.IsDRegister()) {
- asm_.StoreDToOffset(reg.AsVIXLDRegister(), sp, offset);
+ asm_.StoreDToOffset(AsVIXLDRegister(reg), sp, offset);
offset += 8;
}
}
@@ -208,76 +231,71 @@
} else if (src.IsCoreRegister()) {
CHECK_EQ(4u, size);
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(src.AsVIXLRegister());
- asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+ temps.Exclude(AsVIXLRegister(src));
+ asm_.StoreToOffset(kStoreWord, AsVIXLRegister(src), sp, dest.Int32Value());
} else if (src.IsRegisterPair()) {
CHECK_EQ(8u, size);
- asm_.StoreToOffset(kStoreWord, src.AsVIXLRegisterPairLow(), sp, dest.Int32Value());
- asm_.StoreToOffset(kStoreWord, src.AsVIXLRegisterPairHigh(), sp, dest.Int32Value() + 4);
+ asm_.StoreToOffset(kStoreWord, AsVIXLRegisterPairLow(src), sp, dest.Int32Value());
+ asm_.StoreToOffset(kStoreWord, AsVIXLRegisterPairHigh(src), sp, dest.Int32Value() + 4);
} else if (src.IsSRegister()) {
CHECK_EQ(4u, size);
- asm_.StoreSToOffset(src.AsVIXLSRegister(), sp, dest.Int32Value());
+ asm_.StoreSToOffset(AsVIXLSRegister(src), sp, dest.Int32Value());
} else {
CHECK_EQ(8u, size);
CHECK(src.IsDRegister()) << src;
- asm_.StoreDToOffset(src.AsVIXLDRegister(), sp, dest.Int32Value());
+ asm_.StoreDToOffset(AsVIXLDRegister(src), sp, dest.Int32Value());
}
}
void ArmVIXLJNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
- ArmManagedRegister src = msrc.AsArm();
- CHECK(src.IsCoreRegister()) << src;
+ vixl::aarch32::Register src = AsVIXLRegister(msrc.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(src.AsVIXLRegister());
- asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+ temps.Exclude(src);
+ asm_.StoreToOffset(kStoreWord, src, sp, dest.Int32Value());
}
void ArmVIXLJNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
- ArmManagedRegister src = msrc.AsArm();
- CHECK(src.IsCoreRegister()) << src;
+ vixl::aarch32::Register src = AsVIXLRegister(msrc.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(src.AsVIXLRegister());
- asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+ temps.Exclude(src);
+ asm_.StoreToOffset(kStoreWord, src, sp, dest.Int32Value());
}
void ArmVIXLJNIMacroAssembler::StoreSpanning(FrameOffset dest,
ManagedRegister msrc,
FrameOffset in_off,
ManagedRegister mscratch) {
- ArmManagedRegister src = msrc.AsArm();
- ArmManagedRegister scratch = mscratch.AsArm();
- asm_.StoreToOffset(kStoreWord, src.AsVIXLRegister(), sp, dest.Int32Value());
+ vixl::aarch32::Register src = AsVIXLRegister(msrc.AsArm());
+ vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm());
+ asm_.StoreToOffset(kStoreWord, src, sp, dest.Int32Value());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(scratch.AsVIXLRegister());
- asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, in_off.Int32Value());
- asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value() + 4);
+ temps.Exclude(scratch);
+ asm_.LoadFromOffset(kLoadWord, scratch, sp, in_off.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value() + 4);
}
void ArmVIXLJNIMacroAssembler::CopyRef(FrameOffset dest,
FrameOffset src,
ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
+ vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(scratch.AsVIXLRegister());
- asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, src.Int32Value());
- asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, dest.Int32Value());
+ temps.Exclude(scratch);
+ asm_.LoadFromOffset(kLoadWord, scratch, sp, src.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value());
}
-void ArmVIXLJNIMacroAssembler::LoadRef(ManagedRegister dest,
- ManagedRegister base,
+void ArmVIXLJNIMacroAssembler::LoadRef(ManagedRegister mdest,
+ ManagedRegister mbase,
MemberOffset offs,
bool unpoison_reference) {
- ArmManagedRegister dst = dest.AsArm();
- CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+ vixl::aarch32::Register dest = AsVIXLRegister(mdest.AsArm());
+ vixl::aarch32::Register base = AsVIXLRegister(mbase.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(dst.AsVIXLRegister(), base.AsArm().AsVIXLRegister());
- asm_.LoadFromOffset(kLoadWord,
- dst.AsVIXLRegister(),
- base.AsArm().AsVIXLRegister(),
- offs.Int32Value());
+ temps.Exclude(dest, base);
+ asm_.LoadFromOffset(kLoadWord, dest, base, offs.Int32Value());
if (unpoison_reference) {
- asm_.MaybeUnpoisonHeapReference(dst.AsVIXLRegister());
+ asm_.MaybeUnpoisonHeapReference(dest);
}
}
@@ -294,13 +312,12 @@
void ArmVIXLJNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest,
uint32_t imm,
- ManagedRegister scratch) {
- ArmManagedRegister mscratch = scratch.AsArm();
- CHECK(mscratch.IsCoreRegister()) << mscratch;
+ ManagedRegister mscratch) {
+ vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(mscratch.AsVIXLRegister());
- asm_.LoadImmediate(mscratch.AsVIXLRegister(), imm);
- asm_.StoreToOffset(kStoreWord, mscratch.AsVIXLRegister(), sp, dest.Int32Value());
+ temps.Exclude(scratch);
+ asm_.LoadImmediate(scratch, imm);
+ asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value());
}
void ArmVIXLJNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
@@ -313,23 +330,21 @@
return Load(m_dst.AsArm(), tr, src.Int32Value(), size);
}
-void ArmVIXLJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) {
- ArmManagedRegister dst = m_dst.AsArm();
- CHECK(dst.IsCoreRegister()) << dst;
+void ArmVIXLJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) {
+ vixl::aarch32::Register dest = AsVIXLRegister(mdest.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(dst.AsVIXLRegister());
- asm_.LoadFromOffset(kLoadWord, dst.AsVIXLRegister(), tr, offs.Int32Value());
+ temps.Exclude(dest);
+ asm_.LoadFromOffset(kLoadWord, dest, tr, offs.Int32Value());
}
void ArmVIXLJNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
ThreadOffset32 thr_offs,
ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
+ vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(scratch.AsVIXLRegister());
- asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value());
- asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, fr_offs.Int32Value());
+ temps.Exclude(scratch);
+ asm_.LoadFromOffset(kLoadWord, scratch, tr, thr_offs.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch, sp, fr_offs.Int32Value());
}
void ArmVIXLJNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
@@ -341,12 +356,11 @@
void ArmVIXLJNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
FrameOffset fr_offs,
ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
+ vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(scratch.AsVIXLRegister());
- asm_.AddConstant(scratch.AsVIXLRegister(), sp, fr_offs.Int32Value());
- asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), tr, thr_offs.Int32Value());
+ temps.Exclude(scratch);
+ asm_.AddConstant(scratch, sp, fr_offs.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch, tr, thr_offs.Int32Value());
}
void ArmVIXLJNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
@@ -363,43 +377,43 @@
UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
}
-void ArmVIXLJNIMacroAssembler::Move(ManagedRegister m_dst,
- ManagedRegister m_src,
+void ArmVIXLJNIMacroAssembler::Move(ManagedRegister mdst,
+ ManagedRegister msrc,
size_t size ATTRIBUTE_UNUSED) {
- ArmManagedRegister dst = m_dst.AsArm();
- ArmManagedRegister src = m_src.AsArm();
+ ArmManagedRegister dst = mdst.AsArm();
+ ArmManagedRegister src = msrc.AsArm();
if (!dst.Equals(src)) {
if (dst.IsCoreRegister()) {
CHECK(src.IsCoreRegister()) << src;
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(dst.AsVIXLRegister());
- ___ Mov(dst.AsVIXLRegister(), src.AsVIXLRegister());
+ temps.Exclude(AsVIXLRegister(dst));
+ ___ Mov(AsVIXLRegister(dst), AsVIXLRegister(src));
} else if (dst.IsDRegister()) {
if (src.IsDRegister()) {
- ___ Vmov(F64, dst.AsVIXLDRegister(), src.AsVIXLDRegister());
+ ___ Vmov(F64, AsVIXLDRegister(dst), AsVIXLDRegister(src));
} else {
// VMOV Dn, Rlo, Rhi (Dn = {Rlo, Rhi})
CHECK(src.IsRegisterPair()) << src;
- ___ Vmov(dst.AsVIXLDRegister(), src.AsVIXLRegisterPairLow(), src.AsVIXLRegisterPairHigh());
+ ___ Vmov(AsVIXLDRegister(dst), AsVIXLRegisterPairLow(src), AsVIXLRegisterPairHigh(src));
}
} else if (dst.IsSRegister()) {
if (src.IsSRegister()) {
- ___ Vmov(F32, dst.AsVIXLSRegister(), src.AsVIXLSRegister());
+ ___ Vmov(F32, AsVIXLSRegister(dst), AsVIXLSRegister(src));
} else {
// VMOV Sn, Rn (Sn = Rn)
CHECK(src.IsCoreRegister()) << src;
- ___ Vmov(dst.AsVIXLSRegister(), src.AsVIXLRegister());
+ ___ Vmov(AsVIXLSRegister(dst), AsVIXLRegister(src));
}
} else {
CHECK(dst.IsRegisterPair()) << dst;
CHECK(src.IsRegisterPair()) << src;
// Ensure that the first move doesn't clobber the input of the second.
if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
- ___ Mov(dst.AsVIXLRegisterPairLow(), src.AsVIXLRegisterPairLow());
- ___ Mov(dst.AsVIXLRegisterPairHigh(), src.AsVIXLRegisterPairHigh());
+ ___ Mov(AsVIXLRegisterPairLow(dst), AsVIXLRegisterPairLow(src));
+ ___ Mov(AsVIXLRegisterPairHigh(dst), AsVIXLRegisterPairHigh(src));
} else {
- ___ Mov(dst.AsVIXLRegisterPairHigh(), src.AsVIXLRegisterPairHigh());
- ___ Mov(dst.AsVIXLRegisterPairLow(), src.AsVIXLRegisterPairLow());
+ ___ Mov(AsVIXLRegisterPairHigh(dst), AsVIXLRegisterPairHigh(src));
+ ___ Mov(AsVIXLRegisterPairLow(dst), AsVIXLRegisterPairLow(src));
}
}
}
@@ -407,21 +421,20 @@
void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dest,
FrameOffset src,
- ManagedRegister scratch,
+ ManagedRegister mscratch,
size_t size) {
- ArmManagedRegister temp = scratch.AsArm();
- CHECK(temp.IsCoreRegister()) << temp;
+ vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm());
CHECK(size == 4 || size == 8) << size;
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(temp.AsVIXLRegister());
+ temps.Exclude(scratch);
if (size == 4) {
- asm_.LoadFromOffset(kLoadWord, temp.AsVIXLRegister(), sp, src.Int32Value());
- asm_.StoreToOffset(kStoreWord, temp.AsVIXLRegister(), sp, dest.Int32Value());
+ asm_.LoadFromOffset(kLoadWord, scratch, sp, src.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value());
} else if (size == 8) {
- asm_.LoadFromOffset(kLoadWord, temp.AsVIXLRegister(), sp, src.Int32Value());
- asm_.StoreToOffset(kStoreWord, temp.AsVIXLRegister(), sp, dest.Int32Value());
- asm_.LoadFromOffset(kLoadWord, temp.AsVIXLRegister(), sp, src.Int32Value() + 4);
- asm_.StoreToOffset(kStoreWord, temp.AsVIXLRegister(), sp, dest.Int32Value() + 4);
+ asm_.LoadFromOffset(kLoadWord, scratch, sp, src.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value());
+ asm_.LoadFromOffset(kLoadWord, scratch, sp, src.Int32Value() + 4);
+ asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value() + 4);
}
}
@@ -471,48 +484,44 @@
FrameOffset handle_scope_offset,
ManagedRegister min_reg,
bool null_allowed) {
- ArmManagedRegister out_reg = mout_reg.AsArm();
- ArmManagedRegister in_reg = min_reg.AsArm();
- CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
- CHECK(out_reg.IsCoreRegister()) << out_reg;
+ vixl::aarch32::Register out_reg = AsVIXLRegister(mout_reg.AsArm());
+ vixl::aarch32::Register in_reg =
+ min_reg.AsArm().IsNoRegister() ? vixl::aarch32::Register() : AsVIXLRegister(min_reg.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(out_reg.AsVIXLRegister());
+ temps.Exclude(out_reg);
if (null_allowed) {
// Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
// the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
- if (in_reg.IsNoRegister()) {
- asm_.LoadFromOffset(kLoadWord,
- out_reg.AsVIXLRegister(),
- sp,
- handle_scope_offset.Int32Value());
+ if (!in_reg.IsValid()) {
+ asm_.LoadFromOffset(kLoadWord, out_reg, sp, handle_scope_offset.Int32Value());
in_reg = out_reg;
}
- temps.Exclude(in_reg.AsVIXLRegister());
- ___ Cmp(in_reg.AsVIXLRegister(), 0);
+ temps.Exclude(in_reg);
+ ___ Cmp(in_reg, 0);
if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value())) {
- if (!out_reg.Equals(in_reg)) {
+ if (!out_reg.Is(in_reg)) {
ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
3 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
___ it(eq, 0xc);
- ___ mov(eq, out_reg.AsVIXLRegister(), 0);
- asm_.AddConstantInIt(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
+ ___ mov(eq, out_reg, 0);
+ asm_.AddConstantInIt(out_reg, sp, handle_scope_offset.Int32Value(), ne);
} else {
ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
2 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
___ it(ne, 0x8);
- asm_.AddConstantInIt(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
+ asm_.AddConstantInIt(out_reg, sp, handle_scope_offset.Int32Value(), ne);
}
} else {
// TODO: Implement this (old arm assembler would have crashed here).
UNIMPLEMENTED(FATAL);
}
} else {
- asm_.AddConstant(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value());
+ asm_.AddConstant(out_reg, sp, handle_scope_offset.Int32Value());
}
}
@@ -520,31 +529,30 @@
FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
+ vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(scratch.AsVIXLRegister());
+ temps.Exclude(scratch);
if (null_allowed) {
- asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value());
+ asm_.LoadFromOffset(kLoadWord, scratch, sp, handle_scope_offset.Int32Value());
// Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
// the address in the handle scope holding the reference.
// e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
- ___ Cmp(scratch.AsVIXLRegister(), 0);
+ ___ Cmp(scratch, 0);
if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value())) {
ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
2 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
___ it(ne, 0x8);
- asm_.AddConstantInIt(scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
+ asm_.AddConstantInIt(scratch, sp, handle_scope_offset.Int32Value(), ne);
} else {
// TODO: Implement this (old arm assembler would have crashed here).
UNIMPLEMENTED(FATAL);
}
} else {
- asm_.AddConstant(scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value());
+ asm_.AddConstant(scratch, sp, handle_scope_offset.Int32Value());
}
- asm_.StoreToOffset(kStoreWord, scratch.AsVIXLRegister(), sp, out_off.Int32Value());
+ asm_.StoreToOffset(kStoreWord, scratch, sp, out_off.Int32Value());
}
void ArmVIXLJNIMacroAssembler::LoadReferenceFromHandleScope(
@@ -566,32 +574,23 @@
void ArmVIXLJNIMacroAssembler::Call(ManagedRegister mbase,
Offset offset,
ManagedRegister mscratch) {
- ArmManagedRegister base = mbase.AsArm();
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(base.IsCoreRegister()) << base;
- CHECK(scratch.IsCoreRegister()) << scratch;
+ vixl::aarch32::Register base = AsVIXLRegister(mbase.AsArm());
+ vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(scratch.AsVIXLRegister());
- asm_.LoadFromOffset(kLoadWord,
- scratch.AsVIXLRegister(),
- base.AsVIXLRegister(),
- offset.Int32Value());
- ___ Blx(scratch.AsVIXLRegister());
+ temps.Exclude(scratch);
+ asm_.LoadFromOffset(kLoadWord, scratch, base, offset.Int32Value());
+ ___ Blx(scratch);
// TODO: place reference map on call.
}
void ArmVIXLJNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
+ vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(scratch.AsVIXLRegister());
+ temps.Exclude(scratch);
// Call *(*(SP + base) + offset)
- asm_.LoadFromOffset(kLoadWord, scratch.AsVIXLRegister(), sp, base.Int32Value());
- asm_.LoadFromOffset(kLoadWord,
- scratch.AsVIXLRegister(),
- scratch.AsVIXLRegister(),
- offset.Int32Value());
- ___ Blx(scratch.AsVIXLRegister());
+ asm_.LoadFromOffset(kLoadWord, scratch, sp, base.Int32Value());
+ asm_.LoadFromOffset(kLoadWord, scratch, scratch, offset.Int32Value());
+ ___ Blx(scratch);
// TODO: place reference map on call
}
@@ -602,8 +601,8 @@
void ArmVIXLJNIMacroAssembler::GetCurrentThread(ManagedRegister mtr) {
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(mtr.AsArm().AsVIXLRegister());
- ___ Mov(mtr.AsArm().AsVIXLRegister(), tr);
+ temps.Exclude(AsVIXLRegister(mtr.AsArm()));
+ ___ Mov(AsVIXLRegister(mtr.AsArm()), tr);
}
void ArmVIXLJNIMacroAssembler::GetCurrentThread(FrameOffset dest_offset,
@@ -611,19 +610,19 @@
asm_.StoreToOffset(kStoreWord, tr, sp, dest_offset.Int32Value());
}
-void ArmVIXLJNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
+void ArmVIXLJNIMacroAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
CHECK_ALIGNED(stack_adjust, kStackAlignment);
- ArmManagedRegister scratch = m_scratch.AsArm();
+ vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(scratch.AsVIXLRegister());
+ temps.Exclude(scratch);
exception_blocks_.emplace_back(
- new ArmVIXLJNIMacroAssembler::ArmException(scratch, stack_adjust));
+ new ArmVIXLJNIMacroAssembler::ArmException(mscratch.AsArm(), stack_adjust));
asm_.LoadFromOffset(kLoadWord,
- scratch.AsVIXLRegister(),
+ scratch,
tr,
Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
- ___ Cmp(scratch.AsVIXLRegister(), 0);
+ ___ Cmp(scratch, 0);
vixl32::Label* label = exception_blocks_.back()->Entry();
___ BPreferNear(ne, label);
// TODO: think about using CBNZ here.
@@ -640,19 +639,18 @@
void ArmVIXLJNIMacroAssembler::Jump(JNIMacroLabel* label,
JNIMacroUnaryCondition condition,
- ManagedRegister test) {
+ ManagedRegister mtest) {
CHECK(label != nullptr);
+ vixl::aarch32::Register test = AsVIXLRegister(mtest.AsArm());
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(test.AsArm().AsVIXLRegister());
+ temps.Exclude(test);
switch (condition) {
case JNIMacroUnaryCondition::kZero:
- ___ CompareAndBranchIfZero(test.AsArm().AsVIXLRegister(),
- ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
+ ___ CompareAndBranchIfZero(test, ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
break;
case JNIMacroUnaryCondition::kNotZero:
- ___ CompareAndBranchIfNonZero(test.AsArm().AsVIXLRegister(),
- ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
+ ___ CompareAndBranchIfNonZero(test, ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
break;
default:
LOG(FATAL) << "Not implemented unary condition: " << static_cast<int>(condition);
@@ -672,12 +670,13 @@
DecreaseFrameSize(exception->stack_adjust_);
}
+ vixl::aarch32::Register scratch = AsVIXLRegister(exception->scratch_);
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(exception->scratch_.AsVIXLRegister());
+ temps.Exclude(scratch);
// Pass exception object as argument.
// Don't care about preserving r0 as this won't return.
- ___ Mov(r0, exception->scratch_.AsVIXLRegister());
- temps.Include(exception->scratch_.AsVIXLRegister());
+ ___ Mov(r0, scratch);
+ temps.Include(scratch);
// TODO: check that exception->scratch_ is dead by this point.
vixl32::Register temp = temps.Acquire();
___ Ldr(temp,
@@ -698,26 +697,27 @@
if (dest.IsNoRegister()) {
CHECK_EQ(0u, size) << dest;
} else if (dest.IsCoreRegister()) {
- CHECK(!dest.AsVIXLRegister().Is(sp)) << dest;
+ vixl::aarch32::Register dst = AsVIXLRegister(dest);
+ CHECK(!dst.Is(sp)) << dest;
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(dest.AsVIXLRegister());
+ temps.Exclude(dst);
if (size == 1u) {
- ___ Ldrb(dest.AsVIXLRegister(), MemOperand(base, offset));
+ ___ Ldrb(dst, MemOperand(base, offset));
} else {
CHECK_EQ(4u, size) << dest;
- ___ Ldr(dest.AsVIXLRegister(), MemOperand(base, offset));
+ ___ Ldr(dst, MemOperand(base, offset));
}
} else if (dest.IsRegisterPair()) {
CHECK_EQ(8u, size) << dest;
- ___ Ldr(dest.AsVIXLRegisterPairLow(), MemOperand(base, offset));
- ___ Ldr(dest.AsVIXLRegisterPairHigh(), MemOperand(base, offset + 4));
+ ___ Ldr(AsVIXLRegisterPairLow(dest), MemOperand(base, offset));
+ ___ Ldr(AsVIXLRegisterPairHigh(dest), MemOperand(base, offset + 4));
} else if (dest.IsSRegister()) {
- ___ Vldr(dest.AsVIXLSRegister(), MemOperand(base, offset));
+ ___ Vldr(AsVIXLSRegister(dest), MemOperand(base, offset));
} else {
CHECK(dest.IsDRegister()) << dest;
- ___ Vldr(dest.AsVIXLDRegister(), MemOperand(base, offset));
+ ___ Vldr(AsVIXLDRegister(dest), MemOperand(base, offset));
}
}
diff --git a/compiler/utils/arm/managed_register_arm.h b/compiler/utils/arm/managed_register_arm.h
index 26f23b2..e42572d 100644
--- a/compiler/utils/arm/managed_register_arm.h
+++ b/compiler/utils/arm/managed_register_arm.h
@@ -20,15 +20,8 @@
#include <android-base/logging.h>
#include "constants_arm.h"
-#include "debug/dwarf/register.h"
#include "utils/managed_register.h"
-// TODO(VIXL): Make VIXL compile with -Wshadow.
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wshadow"
-#include "aarch32/macro-assembler-aarch32.h"
-#pragma GCC diagnostic pop
-
namespace art {
namespace arm {
@@ -97,31 +90,16 @@
return static_cast<Register>(id_);
}
- vixl::aarch32::Register AsVIXLRegister() const {
- CHECK(IsCoreRegister());
- return vixl::aarch32::Register(id_);
- }
-
constexpr SRegister AsSRegister() const {
CHECK(IsSRegister());
return static_cast<SRegister>(id_ - kNumberOfCoreRegIds);
}
- vixl::aarch32::SRegister AsVIXLSRegister() const {
- CHECK(IsSRegister());
- return vixl::aarch32::SRegister(id_ - kNumberOfCoreRegIds);
- }
-
constexpr DRegister AsDRegister() const {
CHECK(IsDRegister());
return static_cast<DRegister>(id_ - kNumberOfCoreRegIds - kNumberOfSRegIds);
}
- vixl::aarch32::DRegister AsVIXLDRegister() const {
- CHECK(IsDRegister());
- return vixl::aarch32::DRegister(id_ - kNumberOfCoreRegIds - kNumberOfSRegIds);
- }
-
constexpr SRegister AsOverlappingDRegisterLow() const {
CHECK(IsOverlappingDRegister());
DRegister d_reg = AsDRegister();
@@ -150,20 +128,12 @@
return FromRegId(AllocIdLow()).AsCoreRegister();
}
- vixl::aarch32::Register AsVIXLRegisterPairLow() const {
- return vixl::aarch32::Register(AsRegisterPairLow());
- }
-
constexpr Register AsRegisterPairHigh() const {
CHECK(IsRegisterPair());
// Appropriate mapping of register ids allows to use AllocIdHigh().
return FromRegId(AllocIdHigh()).AsCoreRegister();
}
- vixl::aarch32::Register AsVIXLRegisterPairHigh() const {
- return vixl::aarch32::Register(AsRegisterPairHigh());
- }
-
constexpr bool IsCoreRegister() const {
CHECK(IsValidManagedRegister());
return (0 <= id_) && (id_ < kNumberOfCoreRegIds);
@@ -255,16 +225,16 @@
return FromDRegister(static_cast<DRegister>(r));
}
- private:
- constexpr bool IsValidManagedRegister() const {
- return (0 <= id_) && (id_ < kNumberOfRegIds);
- }
-
int RegId() const {
CHECK(!IsNoRegister());
return id_;
}
+ private:
+ constexpr bool IsValidManagedRegister() const {
+ return (0 <= id_) && (id_ < kNumberOfRegIds);
+ }
+
int AllocId() const {
CHECK(IsValidManagedRegister() &&
!IsOverlappingDRegister() && !IsRegisterPair());
diff --git a/compiler/utils/arm64/managed_register_arm64.h b/compiler/utils/arm64/managed_register_arm64.h
index 9ce7ec9..0513890 100644
--- a/compiler/utils/arm64/managed_register_arm64.h
+++ b/compiler/utils/arm64/managed_register_arm64.h
@@ -20,7 +20,6 @@
#include <android-base/logging.h>
#include "arch/arm64/registers_arm64.h"
-#include "debug/dwarf/register.h"
#include "utils/managed_register.h"
namespace art {
diff --git a/compiler/utils/mips/assembler_mips32r5_test.cc b/compiler/utils/mips/assembler_mips32r5_test.cc
index 9a69ffd..0f85892 100644
--- a/compiler/utils/mips/assembler_mips32r5_test.cc
+++ b/compiler/utils/mips/assembler_mips32r5_test.cc
@@ -45,6 +45,16 @@
uint32_t,
mips::VectorRegister> Base;
+ // These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
+ // and reimplement it without the verification against `assembly_string`. b/73903608
+ void DriverStr(const std::string& assembly_string ATTRIBUTE_UNUSED,
+ const std::string& test_name ATTRIBUTE_UNUSED) {
+ GetAssembler()->FinalizeCode();
+ std::vector<uint8_t> data(GetAssembler()->CodeSize());
+ MemoryRegion code(data.data(), data.size());
+ GetAssembler()->FinalizeInstructions(code);
+ }
+
AssemblerMIPS32r5Test() :
instruction_set_features_(MipsInstructionSetFeatures::FromVariant("mips32r5", nullptr)) {
}
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 691c33f..3d876ca 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -45,6 +45,16 @@
uint32_t,
mips::VectorRegister> Base;
+ // These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
+ // and reimplement it without the verification against `assembly_string`. b/73903608
+ void DriverStr(const std::string& assembly_string ATTRIBUTE_UNUSED,
+ const std::string& test_name ATTRIBUTE_UNUSED) {
+ GetAssembler()->FinalizeCode();
+ std::vector<uint8_t> data(GetAssembler()->CodeSize());
+ MemoryRegion code(data.data(), data.size());
+ GetAssembler()->FinalizeInstructions(code);
+ }
+
AssemblerMIPS32r6Test() :
instruction_set_features_(MipsInstructionSetFeatures::FromVariant("mips32r6", nullptr)) {
}
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index b027d3a..f94d074 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -43,6 +43,16 @@
mips::FRegister,
uint32_t> Base;
+ // These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
+ // and reimplement it without the verification against `assembly_string`. b/73903608
+ void DriverStr(const std::string& assembly_string ATTRIBUTE_UNUSED,
+ const std::string& test_name ATTRIBUTE_UNUSED) {
+ GetAssembler()->FinalizeCode();
+ std::vector<uint8_t> data(GetAssembler()->CodeSize());
+ MemoryRegion code(data.data(), data.size());
+ GetAssembler()->FinalizeInstructions(code);
+ }
+
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
std::string GetArchitectureString() OVERRIDE {
diff --git a/compiler/utils/mips/managed_register_mips.h b/compiler/utils/mips/managed_register_mips.h
index 66204e7..18d5821 100644
--- a/compiler/utils/mips/managed_register_mips.h
+++ b/compiler/utils/mips/managed_register_mips.h
@@ -18,7 +18,6 @@
#define ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
#include "constants_mips.h"
-#include "debug/dwarf/register.h"
#include "utils/managed_register.h"
namespace art {
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index fb5f12b..a53ff7c 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -48,6 +48,16 @@
uint32_t,
mips64::VectorRegister> Base;
+ // These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
+ // and reimplement it without the verification against `assembly_string`. b/73903608
+ void DriverStr(const std::string& assembly_string ATTRIBUTE_UNUSED,
+ const std::string& test_name ATTRIBUTE_UNUSED) {
+ GetAssembler()->FinalizeCode();
+ std::vector<uint8_t> data(GetAssembler()->CodeSize());
+ MemoryRegion code(data.data(), data.size());
+ GetAssembler()->FinalizeInstructions(code);
+ }
+
AssemblerMIPS64Test()
: instruction_set_features_(Mips64InstructionSetFeatures::FromVariant("default", nullptr)) {}
diff --git a/compiler/utils/mips64/managed_register_mips64.h b/compiler/utils/mips64/managed_register_mips64.h
index 3980199..94166d3 100644
--- a/compiler/utils/mips64/managed_register_mips64.h
+++ b/compiler/utils/mips64/managed_register_mips64.h
@@ -18,7 +18,6 @@
#define ART_COMPILER_UTILS_MIPS64_MANAGED_REGISTER_MIPS64_H_
#include "constants_mips64.h"
-#include "debug/dwarf/register.h"
#include "utils/managed_register.h"
namespace art {
diff --git a/compiler/utils/x86/managed_register_x86.h b/compiler/utils/x86/managed_register_x86.h
index c0c2b65..8810bfa 100644
--- a/compiler/utils/x86/managed_register_x86.h
+++ b/compiler/utils/x86/managed_register_x86.h
@@ -18,7 +18,6 @@
#define ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_
#include "constants_x86.h"
-#include "debug/dwarf/register.h"
#include "utils/managed_register.h"
namespace art {
diff --git a/compiler/utils/x86_64/managed_register_x86_64.h b/compiler/utils/x86_64/managed_register_x86_64.h
index 32af672..6760882 100644
--- a/compiler/utils/x86_64/managed_register_x86_64.h
+++ b/compiler/utils/x86_64/managed_register_x86_64.h
@@ -18,7 +18,6 @@
#define ART_COMPILER_UTILS_X86_64_MANAGED_REGISTER_X86_64_H_
#include "constants_x86_64.h"
-#include "debug/dwarf/register.h"
#include "utils/managed_register.h"
namespace art {
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 811d6db..a129171 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -299,10 +299,17 @@
public:
Region()
: idx_(static_cast<size_t>(-1)),
- begin_(nullptr), top_(nullptr), end_(nullptr),
- state_(RegionState::kRegionStateAllocated), type_(RegionType::kRegionTypeToSpace),
- objects_allocated_(0), alloc_time_(0), live_bytes_(static_cast<size_t>(-1)),
- is_newly_allocated_(false), is_a_tlab_(false), thread_(nullptr) {}
+ live_bytes_(static_cast<size_t>(-1)),
+ begin_(nullptr),
+ thread_(nullptr),
+ top_(nullptr),
+ end_(nullptr),
+ objects_allocated_(0),
+ alloc_time_(0),
+ is_newly_allocated_(false),
+ is_a_tlab_(false),
+ state_(RegionState::kRegionStateAllocated),
+ type_(RegionType::kRegionTypeToSpace) {}
void Init(size_t idx, uint8_t* begin, uint8_t* end) {
idx_ = idx;
@@ -496,22 +503,22 @@
private:
size_t idx_; // The region's index in the region space.
+ size_t live_bytes_; // The live bytes. Used to compute the live percent.
uint8_t* begin_; // The begin address of the region.
+ Thread* thread_; // The owning thread if it's a tlab.
// Note that `top_` can be higher than `end_` in the case of a
// large region, where an allocated object spans multiple regions
// (large region + one or more large tail regions).
Atomic<uint8_t*> top_; // The current position of the allocation.
uint8_t* end_; // The end address of the region.
- RegionState state_; // The region state (see RegionState).
- RegionType type_; // The region type (see RegionType).
Atomic<size_t> objects_allocated_; // The number of objects allocated.
uint32_t alloc_time_; // The allocation time of the region.
// Note that newly allocated and evacuated regions use -1 as
// special value for `live_bytes_`.
- size_t live_bytes_; // The live bytes. Used to compute the live percent.
bool is_newly_allocated_; // True if it's allocated after the last collection.
bool is_a_tlab_; // True if it's a tlab.
- Thread* thread_; // The owning thread if it's a tlab.
+ RegionState state_; // The region state (see RegionState).
+ RegionType type_; // The region type (see RegionType).
friend class RegionSpace;
};
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 5518eb2..3aa481a 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -252,12 +252,6 @@
.Define("-Xstackdumplockprofthreshold:_")
.WithType<unsigned int>()
.IntoKey(M::StackDumpLockProfThreshold)
- .Define("-Xusetombstonedtraces")
- .WithValue(true)
- .IntoKey(M::UseTombstonedTraces)
- .Define("-Xstacktracefile:_")
- .WithType<std::string>()
- .IntoKey(M::StackTraceFile)
.Define("-Xmethod-trace")
.IntoKey(M::MethodTrace)
.Define("-Xmethod-trace-file:_")
@@ -699,7 +693,6 @@
UsageMessage(stream, "The following Dalvik options are supported:\n");
UsageMessage(stream, " -Xzygote\n");
UsageMessage(stream, " -Xjnitrace:substring (eg NativeClass or nativeMethod)\n");
- UsageMessage(stream, " -Xstacktracefile:<filename>\n");
UsageMessage(stream, " -Xgc:[no]preverify\n");
UsageMessage(stream, " -Xgc:[no]postverify\n");
UsageMessage(stream, " -XX:HeapGrowthLimit=N\n");
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index d12a976..7823014 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -232,7 +232,6 @@
intern_table_(nullptr),
class_linker_(nullptr),
signal_catcher_(nullptr),
- use_tombstoned_traces_(false),
java_vm_(nullptr),
fault_message_lock_("Fault message lock"),
fault_message_(""),
@@ -904,7 +903,7 @@
void Runtime::StartSignalCatcher() {
if (!is_zygote_) {
- signal_catcher_ = new SignalCatcher(stack_trace_file_, use_tombstoned_traces_);
+ signal_catcher_ = new SignalCatcher();
}
}
@@ -1152,12 +1151,6 @@
abort_ = runtime_options.GetOrDefault(Opt::HookAbort);
default_stack_size_ = runtime_options.GetOrDefault(Opt::StackSize);
- use_tombstoned_traces_ = runtime_options.GetOrDefault(Opt::UseTombstonedTraces);
-#if !defined(ART_TARGET_ANDROID)
- CHECK(!use_tombstoned_traces_)
- << "-Xusetombstonedtraces is only supported in an Android environment";
-#endif
- stack_trace_file_ = runtime_options.ReleaseOrDefault(Opt::StackTraceFile);
compiler_executable_ = runtime_options.ReleaseOrDefault(Opt::Compiler);
compiler_options_ = runtime_options.ReleaseOrDefault(Opt::CompilerOptions);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 1b7663c..56d95e0 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -871,14 +871,6 @@
SignalCatcher* signal_catcher_;
- // If true, the runtime will connect to tombstoned via a socket to
- // request an open file descriptor to write its traces to.
- bool use_tombstoned_traces_;
-
- // Location to which traces must be written on SIGQUIT. Only used if
- // tombstoned_traces_ == false.
- std::string stack_trace_file_;
-
std::unique_ptr<JavaVMExt> java_vm_;
std::unique_ptr<jit::Jit> jit_;
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 4121ad6..427385d 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -103,8 +103,6 @@
RUNTIME_OPTIONS_KEY (LogVerbosity, Verbose)
RUNTIME_OPTIONS_KEY (unsigned int, LockProfThreshold)
RUNTIME_OPTIONS_KEY (unsigned int, StackDumpLockProfThreshold)
-RUNTIME_OPTIONS_KEY (bool, UseTombstonedTraces, false)
-RUNTIME_OPTIONS_KEY (std::string, StackTraceFile)
RUNTIME_OPTIONS_KEY (Unit, MethodTrace)
RUNTIME_OPTIONS_KEY (std::string, MethodTraceFile, "/data/misc/trace/method-trace-file.bin")
RUNTIME_OPTIONS_KEY (unsigned int, MethodTraceFileSize, 10 * MB)
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index d590ad5..f4a27b8 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -73,19 +73,10 @@
#endif
}
-SignalCatcher::SignalCatcher(const std::string& stack_trace_file,
- bool use_tombstoned_stack_trace_fd)
- : stack_trace_file_(stack_trace_file),
- use_tombstoned_stack_trace_fd_(use_tombstoned_stack_trace_fd),
- lock_("SignalCatcher lock"),
+SignalCatcher::SignalCatcher()
+ : lock_("SignalCatcher lock"),
cond_("SignalCatcher::cond_", lock_),
thread_(nullptr) {
-#if !defined(ART_TARGET_ANDROID)
- // We're not running on Android, so we can't communicate with tombstoned
- // to ask for an open file.
- CHECK(!use_tombstoned_stack_trace_fd_);
-#endif
-
SetHaltFlag(false);
// Create a raw pthread; its start routine will attach to the runtime.
@@ -116,37 +107,11 @@
return halt_;
}
-bool SignalCatcher::OpenStackTraceFile(android::base::unique_fd* tombstone_fd,
- android::base::unique_fd* output_fd) {
- if (use_tombstoned_stack_trace_fd_) {
-#if defined(ART_TARGET_ANDROID)
- return tombstoned_connect(getpid(), tombstone_fd, output_fd, kDebuggerdJavaBacktrace);
-#else
- UNUSED(tombstone_fd);
- UNUSED(output_fd);
-#endif
- }
-
- // The runtime is not configured to dump traces to a file, will LOG(INFO)
- // instead.
- if (stack_trace_file_.empty()) {
- return false;
- }
-
- int fd = open(stack_trace_file_.c_str(), O_APPEND | O_CREAT | O_WRONLY, 0666);
- if (fd == -1) {
- PLOG(ERROR) << "Unable to open stack trace file '" << stack_trace_file_ << "'";
- return false;
- }
-
- output_fd->reset(fd);
- return true;
-}
-
void SignalCatcher::Output(const std::string& s) {
+#if defined(ART_TARGET_ANDROID)
android::base::unique_fd tombstone_fd;
android::base::unique_fd output_fd;
- if (!OpenStackTraceFile(&tombstone_fd, &output_fd)) {
+ if (!tombstoned_connect(getpid(), &tombstone_fd, &output_fd, kDebuggerdJavaBacktrace)) {
LOG(INFO) << s;
return;
}
@@ -161,19 +126,16 @@
file->Erase();
}
- const std::string output_path_msg = (use_tombstoned_stack_trace_fd_) ?
- "[tombstoned]" : stack_trace_file_;
-
if (success) {
- LOG(INFO) << "Wrote stack traces to '" << output_path_msg << "'";
+ LOG(INFO) << "Wrote stack traces to tombstoned";
} else {
- PLOG(ERROR) << "Failed to write stack traces to '" << output_path_msg << "'";
+ PLOG(ERROR) << "Failed to write stack traces to tombstoned";
}
-
-#if defined(ART_TARGET_ANDROID)
- if (use_tombstoned_stack_trace_fd_ && !tombstoned_notify_completion(tombstone_fd)) {
+ if (!tombstoned_notify_completion(tombstone_fd)) {
PLOG(WARNING) << "Unable to notify tombstoned of dump completion";
}
+#else
+ LOG(INFO) << s;
#endif
}
diff --git a/runtime/signal_catcher.h b/runtime/signal_catcher.h
index 8a2a728..46eae7e 100644
--- a/runtime/signal_catcher.h
+++ b/runtime/signal_catcher.h
@@ -33,17 +33,7 @@
*/
class SignalCatcher {
public:
- // If |use_tombstoned_stack_trace_fd| is |true|, traces will be
- // written to a file descriptor provided by tombstoned. The process
- // will communicate with tombstoned via a unix domain socket. This
- // mode of stack trace dumping is only supported in an Android
- // environment.
- //
- // If false, all traces will be dumped to |stack_trace_file| if it's
- // non-empty. If |stack_trace_file| is empty, all traces will be written
- // to the log buffer.
- SignalCatcher(const std::string& stack_trace_file,
- const bool use_tombstoned_stack_trace_fd);
+ SignalCatcher();
~SignalCatcher();
void HandleSigQuit() REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
@@ -54,19 +44,12 @@
// NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
static void* Run(void* arg) NO_THREAD_SAFETY_ANALYSIS;
- // NOTE: We're using android::base::unique_fd here for easier
- // interoperability with tombstoned client APIs.
- bool OpenStackTraceFile(android::base::unique_fd* tombstone_fd,
- android::base::unique_fd* output_fd);
void HandleSigUsr1();
void Output(const std::string& s);
void SetHaltFlag(bool new_value) REQUIRES(!lock_);
bool ShouldHalt() REQUIRES(!lock_);
int WaitForSignal(Thread* self, SignalSet& signals) REQUIRES(!lock_);
- std::string stack_trace_file_;
- const bool use_tombstoned_stack_trace_fd_;
-
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
ConditionVariable cond_ GUARDED_BY(lock_);
bool halt_ GUARDED_BY(lock_);
diff --git a/test/913-heaps/expected_d8.diff b/test/913-heaps/expected_d8.diff
index 3ea3c0d..1ad0cbd 100644
--- a/test/913-heaps/expected_d8.diff
+++ b/test/913-heaps/expected_d8.diff
@@ -10,8 +10,8 @@
51c50,51
< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
---
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=5,location= 21])--> 1@1000 [size=16, length=-1]
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=8,location= 21])--> 1@1000 [size=16, length=-1]
+> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
+> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
102,103c102
< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
< root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=136, length=-1]
@@ -24,8 +24,8 @@
117c116,117
< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
---
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=5,location= 21])--> 1@1000 [size=16, length=-1]
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=8,location= 21])--> 1@1000 [size=16, length=-1]
+> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
+> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
162c162
< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
---
@@ -37,8 +37,8 @@
179c179,180
< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
---
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=5,location= 21])--> 1@1000 [size=16, length=-1]
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=8,location= 21])--> 1@1000 [size=16, length=-1]
+> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
+> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
201,202c202
< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
< root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=136, length=-1]
@@ -51,8 +51,8 @@
248c248,249
< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
---
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=5,location= 21])--> 1@1000 [size=16, length=-1]
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=8,location= 21])--> 1@1000 [size=16, length=-1]
+> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
+> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
292d292
< root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=136, length=-1]
347c347
@@ -66,5 +66,5 @@
368c368,369
< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
---
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=5,location= 21])--> 1@1000 [size=16, length=-1]
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=8,location= 21])--> 1@1000 [size=16, length=-1]
+> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
+> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index be1296b..81e77be 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -80,11 +80,6 @@
# The *hard* timeout where we really start trying to kill the dex2oat.
DEX2OAT_RT_TIMEOUT="360" # 6 mins
-# if "y", set -Xstacktracedir and inform the test of its location. When
-# this is set, stack trace dumps (from signal 3) will be written to a file
-# under this directory instead of stdout.
-SET_STACK_TRACE_DUMP_DIR="n"
-
# if "y", run 'sync' before dalvikvm to make sure all files from
# build step (e.g. dex2oat) were finished writing.
SYNC_BEFORE_RUN="n"
@@ -364,9 +359,6 @@
elif [ "x$1" = "x--random-profile" ]; then
RANDOM_PROFILE="y"
shift
- elif [ "x$1" = "x--set-stack-trace-dump-dir" ]; then
- SET_STACK_TRACE_DUMP_DIR="y"
- shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
exit 1
@@ -375,22 +367,12 @@
fi
done
-mkdir_locations=""
-
if [ "$USE_JVM" = "n" ]; then
FLAGS="${FLAGS} ${ANDROID_FLAGS}"
for feature in ${EXPERIMENTAL}; do
FLAGS="${FLAGS} -Xexperimental:${feature} -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:${feature}"
COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xexperimental:${feature}"
done
-
- if [ "$SET_STACK_TRACE_DUMP_DIR" = "y" ]; then
- # Note that DEX_LOCATION is used as a proxy for tmpdir throughout this
- # file (it will be under the test specific folder).
- mkdir_locations="${mkdir_locations} $DEX_LOCATION/stack_traces"
- FLAGS="${FLAGS} -Xstacktracedir:$DEX_LOCATION/stack_traces"
- ARGS="${ARGS} --stack-trace-dir $DEX_LOCATION/stack_traces"
- fi
fi
if [ "x$1" = "x" ] ; then
@@ -684,7 +666,7 @@
dex2oat_cmdline="true"
vdex_cmdline="true"
dm_cmdline="true"
-mkdir_locations="${mkdir_locations} ${DEX_LOCATION}/dalvik-cache/$ISA"
+mkdir_locations="${DEX_LOCATION}/dalvik-cache/$ISA"
strip_cmdline="true"
sync_cmdline="true"
diff --git a/test/knownfailures.json b/test/knownfailures.json
index e109bf5..f313758 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -412,10 +412,11 @@
".*methodhandle.*",
".*method-handle.*",
".*varhandle.*",
- ".*var-handle.*"
+ ".*var-handle.*",
+ "716-jli-jit-samples"
],
"description": [
- "Tests that use invoke-polymorphic/invoke-custom which is not yet supported by",
+ "Tests for bytecodes introduced after DEX version 037 that are unsupported by",
"dexter/slicer."
],
"bug": "b/37272822",
diff --git a/tools/veridex/flow_analysis.cc b/tools/veridex/flow_analysis.cc
index 736abb7..e2833bf 100644
--- a/tools/veridex/flow_analysis.cc
+++ b/tools/veridex/flow_analysis.cc
@@ -243,43 +243,7 @@
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_SUPER:
case Instruction::INVOKE_VIRTUAL: {
- VeriMethod method = resolver_->GetMethod(instruction.VRegB_35c());
- uint32_t args[5];
- instruction.GetVarArgs(args);
- if (method == VeriClass::forName_) {
- RegisterValue value = GetRegister(args[0]);
- last_result_ = RegisterValue(
- value.GetSource(), value.GetDexFileReference(), VeriClass::class_);
- } else if (IsGetField(method)) {
- RegisterValue cls = GetRegister(args[0]);
- RegisterValue name = GetRegister(args[1]);
- field_uses_.push_back(std::make_pair(cls, name));
- last_result_ = GetReturnType(instruction.VRegB_35c());
- } else if (IsGetMethod(method)) {
- RegisterValue cls = GetRegister(args[0]);
- RegisterValue name = GetRegister(args[1]);
- method_uses_.push_back(std::make_pair(cls, name));
- last_result_ = GetReturnType(instruction.VRegB_35c());
- } else if (method == VeriClass::getClass_) {
- RegisterValue obj = GetRegister(args[0]);
- const VeriClass* cls = obj.GetType();
- if (cls != nullptr && cls->GetClassDef() != nullptr) {
- const DexFile::ClassDef* def = cls->GetClassDef();
- last_result_ = RegisterValue(
- RegisterSource::kClass,
- DexFileReference(&resolver_->GetDexFileOf(*cls), def->class_idx_.index_),
- VeriClass::class_);
- } else {
- last_result_ = RegisterValue(
- obj.GetSource(), obj.GetDexFileReference(), VeriClass::class_);
- }
- } else if (method == VeriClass::loadClass_) {
- RegisterValue value = GetRegister(args[1]);
- last_result_ = RegisterValue(
- value.GetSource(), value.GetDexFileReference(), VeriClass::class_);
- } else {
- last_result_ = GetReturnType(instruction.VRegB_35c());
- }
+ last_result_ = AnalyzeInvoke(instruction, /* is_range */ false);
break;
}
@@ -288,7 +252,7 @@
case Instruction::INVOKE_STATIC_RANGE:
case Instruction::INVOKE_SUPER_RANGE:
case Instruction::INVOKE_VIRTUAL_RANGE: {
- last_result_ = GetReturnType(instruction.VRegB_3rc());
+ last_result_ = AnalyzeInvoke(instruction, /* is_range */ true);
break;
}
@@ -520,6 +484,7 @@
case Instruction::IPUT_BYTE:
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT: {
+ AnalyzeFieldSet(instruction);
break;
}
@@ -541,6 +506,7 @@
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT: {
+ AnalyzeFieldSet(instruction);
break;
}
@@ -613,7 +579,112 @@
void VeriFlowAnalysis::Run() {
FindBranches();
+ uint32_t number_of_registers = code_item_accessor_.RegistersSize();
+ uint32_t number_of_parameters = code_item_accessor_.InsSize();
+ std::vector<RegisterValue>& initial_values = *dex_registers_[0].get();
+ for (uint32_t i = 0; i < number_of_parameters; ++i) {
+ initial_values[number_of_registers - number_of_parameters + i] = RegisterValue(
+ RegisterSource::kParameter,
+ i,
+ DexFileReference(&resolver_->GetDexFile(), method_id_),
+ nullptr);
+ }
AnalyzeCode();
}
+static uint32_t GetParameterAt(const Instruction& instruction,
+ bool is_range,
+ uint32_t* args,
+ uint32_t index) {
+ return is_range ? instruction.VRegC() + index : args[index];
+}
+
+RegisterValue FlowAnalysisCollector::AnalyzeInvoke(const Instruction& instruction, bool is_range) {
+ uint32_t id = is_range ? instruction.VRegB_3rc() : instruction.VRegB_35c();
+ VeriMethod method = resolver_->GetMethod(id);
+ uint32_t args[5];
+ if (!is_range) {
+ instruction.GetVarArgs(args);
+ }
+
+ if (method == VeriClass::forName_) {
+ // Class.forName. Fetch the first parameter.
+ RegisterValue value = GetRegister(GetParameterAt(instruction, is_range, args, 0));
+ return RegisterValue(
+ value.GetSource(), value.GetDexFileReference(), VeriClass::class_);
+ } else if (IsGetField(method)) {
+ // Class.getField or Class.getDeclaredField. Fetch the first parameter for the class, and the
+ // second parameter for the field name.
+ RegisterValue cls = GetRegister(GetParameterAt(instruction, is_range, args, 0));
+ RegisterValue name = GetRegister(GetParameterAt(instruction, is_range, args, 1));
+ uses_.push_back(ReflectAccessInfo(cls, name, /* is_method */ false));
+ return GetReturnType(id);
+ } else if (IsGetMethod(method)) {
+ // Class.getMethod or Class.getDeclaredMethod. Fetch the first parameter for the class, and the
+ // second parameter for the field name.
+ RegisterValue cls = GetRegister(GetParameterAt(instruction, is_range, args, 0));
+ RegisterValue name = GetRegister(GetParameterAt(instruction, is_range, args, 1));
+ uses_.push_back(ReflectAccessInfo(cls, name, /* is_method */ true));
+ return GetReturnType(id);
+ } else if (method == VeriClass::getClass_) {
+ // Get the type of the first parameter.
+ RegisterValue obj = GetRegister(GetParameterAt(instruction, is_range, args, 0));
+ const VeriClass* cls = obj.GetType();
+ if (cls != nullptr && cls->GetClassDef() != nullptr) {
+ const DexFile::ClassDef* def = cls->GetClassDef();
+ return RegisterValue(
+ RegisterSource::kClass,
+ DexFileReference(&resolver_->GetDexFileOf(*cls), def->class_idx_.index_),
+ VeriClass::class_);
+ } else {
+ return RegisterValue(
+ obj.GetSource(), obj.GetDexFileReference(), VeriClass::class_);
+ }
+ } else if (method == VeriClass::loadClass_) {
+ // ClassLoader.loadClass. Fetch the first parameter.
+ RegisterValue value = GetRegister(GetParameterAt(instruction, is_range, args, 1));
+ return RegisterValue(
+ value.GetSource(), value.GetDexFileReference(), VeriClass::class_);
+ } else {
+ // Return a RegisterValue referencing the method whose type is the return type
+ // of the method.
+ return GetReturnType(id);
+ }
+}
+
+void FlowAnalysisCollector::AnalyzeFieldSet(const Instruction& instruction ATTRIBUTE_UNUSED) {
+ // There are no fields that escape reflection uses.
+}
+
+RegisterValue FlowAnalysisSubstitutor::AnalyzeInvoke(const Instruction& instruction,
+ bool is_range) {
+ uint32_t id = is_range ? instruction.VRegB_3rc() : instruction.VRegB_35c();
+ MethodReference method(&resolver_->GetDexFile(), id);
+ // TODO: doesn't work for multidex
+ // TODO: doesn't work for overriding (but maybe should be done at a higher level);
+ if (accesses_.find(method) == accesses_.end()) {
+ return GetReturnType(id);
+ }
+ uint32_t args[5];
+ if (!is_range) {
+ instruction.GetVarArgs(args);
+ }
+ for (const ReflectAccessInfo& info : accesses_.at(method)) {
+ if (info.cls.IsParameter() || info.name.IsParameter()) {
+ RegisterValue cls = info.cls.IsParameter()
+ ? GetRegister(GetParameterAt(instruction, is_range, args, info.cls.GetParameterIndex()))
+ : info.cls;
+ RegisterValue name = info.name.IsParameter()
+ ? GetRegister(GetParameterAt(instruction, is_range, args, info.name.GetParameterIndex()))
+ : info.name;
+ uses_.push_back(ReflectAccessInfo(cls, name, info.is_method));
+ }
+ }
+ return GetReturnType(id);
+}
+
+void FlowAnalysisSubstitutor::AnalyzeFieldSet(const Instruction& instruction ATTRIBUTE_UNUSED) {
+ // TODO: analyze field sets.
+}
+
} // namespace art
diff --git a/tools/veridex/flow_analysis.h b/tools/veridex/flow_analysis.h
index 80ae5fc..62c9916 100644
--- a/tools/veridex/flow_analysis.h
+++ b/tools/veridex/flow_analysis.h
@@ -21,13 +21,11 @@
#include "dex/dex_file_reference.h"
#include "dex/method_reference.h"
#include "hidden_api.h"
+#include "resolver.h"
#include "veridex.h"
namespace art {
-class VeridexClass;
-class VeridexResolver;
-
/**
* The source where a dex register comes from.
*/
@@ -45,13 +43,29 @@
*/
class RegisterValue {
public:
- RegisterValue() : source_(RegisterSource::kNone), reference_(nullptr, 0), type_(nullptr) {}
+ RegisterValue() : source_(RegisterSource::kNone),
+ parameter_index_(0),
+ reference_(nullptr, 0),
+ type_(nullptr) {}
RegisterValue(RegisterSource source, DexFileReference reference, const VeriClass* type)
- : source_(source), reference_(reference), type_(type) {}
+ : source_(source), parameter_index_(0), reference_(reference), type_(type) {}
+
+ RegisterValue(RegisterSource source,
+ uint32_t parameter_index,
+ DexFileReference reference,
+ const VeriClass* type)
+ : source_(source), parameter_index_(parameter_index), reference_(reference), type_(type) {}
RegisterSource GetSource() const { return source_; }
DexFileReference GetDexFileReference() const { return reference_; }
const VeriClass* GetType() const { return type_; }
+ uint32_t GetParameterIndex() const {
+ CHECK(IsParameter());
+ return parameter_index_;
+ }
+ bool IsParameter() const { return source_ == RegisterSource::kParameter; }
+ bool IsClass() const { return source_ == RegisterSource::kClass; }
+ bool IsString() const { return source_ == RegisterSource::kString; }
std::string ToString() const {
switch (source_) {
@@ -68,6 +82,8 @@
}
case RegisterSource::kClass:
return reference_.dex_file->StringByTypeIdx(dex::TypeIndex(reference_.index));
+ case RegisterSource::kParameter:
+ return std::string("Parameter of ") + reference_.dex_file->PrettyMethod(reference_.index);
default:
return "<unknown>";
}
@@ -75,6 +91,7 @@
private:
RegisterSource source_;
+ uint32_t parameter_index_;
DexFileReference reference_;
const VeriClass* type_;
};
@@ -85,22 +102,18 @@
class VeriFlowAnalysis {
public:
- VeriFlowAnalysis(VeridexResolver* resolver,
- const CodeItemDataAccessor& code_item_accessor)
+ VeriFlowAnalysis(VeridexResolver* resolver, const ClassDataItemIterator& it)
: resolver_(resolver),
- code_item_accessor_(code_item_accessor),
- dex_registers_(code_item_accessor.InsnsSizeInCodeUnits()),
- instruction_infos_(code_item_accessor.InsnsSizeInCodeUnits()) {}
+ method_id_(it.GetMemberIndex()),
+ code_item_accessor_(resolver->GetDexFile(), it.GetMethodCodeItem()),
+ dex_registers_(code_item_accessor_.InsnsSizeInCodeUnits()),
+ instruction_infos_(code_item_accessor_.InsnsSizeInCodeUnits()) {}
void Run();
- const std::vector<std::pair<RegisterValue, RegisterValue>>& GetFieldUses() const {
- return field_uses_;
- }
-
- const std::vector<std::pair<RegisterValue, RegisterValue>>& GetMethodUses() const {
- return method_uses_;
- }
+ virtual RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) = 0;
+ virtual void AnalyzeFieldSet(const Instruction& instruction) = 0;
+ virtual ~VeriFlowAnalysis() {}
private:
// Find all branches in the code.
@@ -124,14 +137,19 @@
uint32_t dex_register, RegisterSource kind, VeriClass* cls, uint32_t source_id);
void UpdateRegister(uint32_t dex_register, const RegisterValue& value);
void UpdateRegister(uint32_t dex_register, const VeriClass* cls);
- const RegisterValue& GetRegister(uint32_t dex_register);
void ProcessDexInstruction(const Instruction& inst);
void SetVisited(uint32_t dex_pc);
- RegisterValue GetReturnType(uint32_t method_index);
RegisterValue GetFieldType(uint32_t field_index);
+ protected:
+ const RegisterValue& GetRegister(uint32_t dex_register);
+ RegisterValue GetReturnType(uint32_t method_index);
+
VeridexResolver* resolver_;
- const CodeItemDataAccessor& code_item_accessor_;
+
+ private:
+ const uint32_t method_id_;
+ CodeItemDataAccessor code_item_accessor_;
// Vector of register values for all branch targets.
std::vector<std::unique_ptr<std::vector<RegisterValue>>> dex_registers_;
@@ -144,12 +162,59 @@
// The value of invoke instructions, to be fetched when visiting move-result.
RegisterValue last_result_;
+};
- // List of reflection field uses found.
- std::vector<std::pair<RegisterValue, RegisterValue>> field_uses_;
+struct ReflectAccessInfo {
+ RegisterValue cls;
+ RegisterValue name;
+ bool is_method;
- // List of reflection method uses found.
- std::vector<std::pair<RegisterValue, RegisterValue>> method_uses_;
+ ReflectAccessInfo(RegisterValue c, RegisterValue n, bool m) : cls(c), name(n), is_method(m) {}
+
+ bool IsConcrete() const {
+ // We capture RegisterSource::kString for the class, for example in Class.forName.
+ return (cls.IsClass() || cls.IsString()) && name.IsString();
+ }
+};
+
+// Collects all reflection uses.
+class FlowAnalysisCollector : public VeriFlowAnalysis {
+ public:
+ FlowAnalysisCollector(VeridexResolver* resolver, const ClassDataItemIterator& it)
+ : VeriFlowAnalysis(resolver, it) {}
+
+ const std::vector<ReflectAccessInfo>& GetUses() const {
+ return uses_;
+ }
+
+ RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) OVERRIDE;
+ void AnalyzeFieldSet(const Instruction& instruction) OVERRIDE;
+
+ private:
+ // List of reflection uses found, concrete and abstract.
+ std::vector<ReflectAccessInfo> uses_;
+};
+
+// Substitutes reflection uses by new ones.
+class FlowAnalysisSubstitutor : public VeriFlowAnalysis {
+ public:
+ FlowAnalysisSubstitutor(VeridexResolver* resolver,
+ const ClassDataItemIterator& it,
+ const std::map<MethodReference, std::vector<ReflectAccessInfo>>& accesses)
+ : VeriFlowAnalysis(resolver, it), accesses_(accesses) {}
+
+ const std::vector<ReflectAccessInfo>& GetUses() const {
+ return uses_;
+ }
+
+ RegisterValue AnalyzeInvoke(const Instruction& instruction, bool is_range) OVERRIDE;
+ void AnalyzeFieldSet(const Instruction& instruction) OVERRIDE;
+
+ private:
+ // List of reflection uses found, concrete and abstract.
+ std::vector<ReflectAccessInfo> uses_;
+ // The abstract uses we are trying to subsititute.
+ const std::map<MethodReference, std::vector<ReflectAccessInfo>>& accesses_;
};
} // namespace art
diff --git a/tools/veridex/precise_hidden_api_finder.cc b/tools/veridex/precise_hidden_api_finder.cc
index 4ae5769..89754c2 100644
--- a/tools/veridex/precise_hidden_api_finder.cc
+++ b/tools/veridex/precise_hidden_api_finder.cc
@@ -29,7 +29,9 @@
namespace art {
-void PreciseHiddenApiFinder::Run(const std::vector<std::unique_ptr<VeridexResolver>>& resolvers) {
+void PreciseHiddenApiFinder::RunInternal(
+ const std::vector<std::unique_ptr<VeridexResolver>>& resolvers,
+ const std::function<void(VeridexResolver*, const ClassDataItemIterator&)>& action) {
for (const std::unique_ptr<VeridexResolver>& resolver : resolvers) {
const DexFile& dex_file = resolver->GetDexFile();
size_t class_def_count = dex_file.NumClassDefs();
@@ -47,43 +49,67 @@
if (code_item == nullptr) {
continue;
}
- CodeItemDataAccessor code_item_accessor(dex_file, code_item);
- VeriFlowAnalysis ana(resolver.get(), code_item_accessor);
- ana.Run();
- if (!ana.GetFieldUses().empty()) {
- field_uses_[MethodReference(&dex_file, it.GetMemberIndex())] = ana.GetFieldUses();
- }
- if (!ana.GetMethodUses().empty()) {
- method_uses_[MethodReference(&dex_file, it.GetMemberIndex())] = ana.GetMethodUses();
- }
+ action(resolver.get(), it);
}
}
}
}
+void PreciseHiddenApiFinder::AddUsesAt(const std::vector<ReflectAccessInfo>& accesses,
+ MethodReference ref) {
+ for (const ReflectAccessInfo& info : accesses) {
+ if (info.IsConcrete()) {
+ concrete_uses_[ref].push_back(info);
+ } else {
+ abstract_uses_[ref].push_back(info);
+ }
+ }
+}
+
+void PreciseHiddenApiFinder::Run(const std::vector<std::unique_ptr<VeridexResolver>>& resolvers) {
+ // Collect reflection uses.
+ RunInternal(resolvers, [this] (VeridexResolver* resolver, const ClassDataItemIterator& it) {
+ FlowAnalysisCollector collector(resolver, it);
+ collector.Run();
+ AddUsesAt(collector.GetUses(), MethodReference(&resolver->GetDexFile(), it.GetMemberIndex()));
+ });
+
+ // For non-final reflection uses, do a limited fixed point calculation over the code to try
+ // substituting them with final reflection uses.
+ // We limit the number of times we iterate over the code as one run can be long.
+ static const int kMaximumIterations = 10;
+ uint32_t i = 0;
+ while (!abstract_uses_.empty() && (i++ < kMaximumIterations)) {
+ // Fetch and clear the worklist.
+ std::map<MethodReference, std::vector<ReflectAccessInfo>> current_uses
+ = std::move(abstract_uses_);
+ RunInternal(resolvers,
+ [this, current_uses] (VeridexResolver* resolver, const ClassDataItemIterator& it) {
+ FlowAnalysisSubstitutor substitutor(resolver, it, current_uses);
+ substitutor.Run();
+ AddUsesAt(substitutor.GetUses(),
+ MethodReference(&resolver->GetDexFile(), it.GetMemberIndex()));
+ });
+ }
+}
+
void PreciseHiddenApiFinder::Dump(std::ostream& os, HiddenApiStats* stats) {
static const char* kPrefix = " ";
- std::map<std::string, std::vector<MethodReference>> uses;
- for (auto kinds : { field_uses_, method_uses_ }) {
- for (auto it : kinds) {
- MethodReference ref = it.first;
- for (const std::pair<RegisterValue, RegisterValue>& info : it.second) {
- if ((info.first.GetSource() == RegisterSource::kClass ||
- info.first.GetSource() == RegisterSource::kString) &&
- info.second.GetSource() == RegisterSource::kString) {
- std::string cls(info.first.ToString());
- std::string name(info.second.ToString());
- std::string full_name = cls + "->" + name;
- HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(full_name);
- if (api_list != HiddenApiAccessFlags::kWhitelist) {
- uses[full_name].push_back(ref);
- }
- }
+ std::map<std::string, std::vector<MethodReference>> named_uses;
+ for (auto it : concrete_uses_) {
+ MethodReference ref = it.first;
+ for (const ReflectAccessInfo& info : it.second) {
+ std::string cls(info.cls.ToString());
+ std::string name(info.name.ToString());
+ std::string full_name = cls + "->" + name;
+ HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(full_name);
+ if (api_list != HiddenApiAccessFlags::kWhitelist) {
+ named_uses[full_name].push_back(ref);
}
}
}
- for (auto it : uses) {
+ for (auto it : named_uses) {
++stats->reflection_count;
const std::string& full_name = it.first;
HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(full_name);
diff --git a/tools/veridex/precise_hidden_api_finder.h b/tools/veridex/precise_hidden_api_finder.h
index 22744a6..1c4d0ae 100644
--- a/tools/veridex/precise_hidden_api_finder.h
+++ b/tools/veridex/precise_hidden_api_finder.h
@@ -45,9 +45,18 @@
void Dump(std::ostream& os, HiddenApiStats* stats);
private:
+ // Run over all methods of all dex files, and call `action` on each.
+ void RunInternal(
+ const std::vector<std::unique_ptr<VeridexResolver>>& resolvers,
+ const std::function<void(VeridexResolver*, const ClassDataItemIterator&)>& action);
+
+ // Add uses found in method `ref`.
+ void AddUsesAt(const std::vector<ReflectAccessInfo>& accesses, MethodReference ref);
+
const HiddenApi& hidden_api_;
- std::map<MethodReference, std::vector<std::pair<RegisterValue, RegisterValue>>> field_uses_;
- std::map<MethodReference, std::vector<std::pair<RegisterValue, RegisterValue>>> method_uses_;
+
+ std::map<MethodReference, std::vector<ReflectAccessInfo>> concrete_uses_;
+ std::map<MethodReference, std::vector<ReflectAccessInfo>> abstract_uses_;
};
} // namespace art