Cache method lowering info in mir graph.
This should enable easy inlining checks. It should also
improve compilation time of methods that call the same
methods over and over - it is exactly such methods that
tend to exceed our 100ms time limit.
Change-Id: If01cd18e039071a74a1444570283c153429c9cd4
diff --git a/compiler/Android.mk b/compiler/Android.mk
index fdc8540..48e2bcd 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -51,6 +51,7 @@
dex/dex_to_dex_compiler.cc \
dex/mir_dataflow.cc \
dex/mir_field_info.cc \
+ dex/mir_method_info.cc \
dex/mir_optimization.cc \
dex/pass_driver.cc \
dex/bb_optimizations.cc \
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index bd7c40b..1a90ca8 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -37,6 +37,20 @@
};
/**
+ * @class CacheMethodLoweringInfo
+ * @brief Cache the lowering info for methods called by INVOKEs.
+ */
+class CacheMethodLoweringInfo : public Pass {
+ public:
+ CacheMethodLoweringInfo() : Pass("CacheMethodLoweringInfo", kNoNodes) {
+ }
+
+ void Start(CompilationUnit* cUnit) const {
+ cUnit->mir_graph->DoCacheMethodLoweringInfo();
+ }
+};
+
+/**
* @class CodeLayout
* @brief Perform the code layout pass.
*/
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 5314bb7..b96c40d 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -19,6 +19,7 @@
#include "dataflow_iterator-inl.h"
#include "dex_instruction.h"
#include "dex_instruction-inl.h"
+#include "dex/verified_method.h"
#include "dex/quick/dex_file_method_inliner.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "driver/compiler_options.h"
@@ -1168,6 +1169,121 @@
}
}
+void MIRGraph::DoCacheMethodLoweringInfo() {
+ static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface };
+
+ // Embed the map value in the entry to avoid extra padding in 64-bit builds.
+ struct MapEntry {
+ // Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding.
+ const MethodReference* devirt_target;
+ uint16_t target_method_idx;
+ uint16_t invoke_type;
+ // Map value.
+ uint32_t lowering_info_index;
+ };
+
+ // Sort INVOKEs by method index, then by opcode, then by devirtualization target.
+ struct MapEntryComparator {
+ bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
+ if (lhs.target_method_idx != rhs.target_method_idx) {
+ return lhs.target_method_idx < rhs.target_method_idx;
+ }
+ if (lhs.invoke_type != rhs.invoke_type) {
+ return lhs.invoke_type < rhs.invoke_type;
+ }
+ if (lhs.devirt_target != rhs.devirt_target) {
+ if (lhs.devirt_target == nullptr) {
+ return true;
+ }
+ if (rhs.devirt_target == nullptr) {
+ return false;
+ }
+ return devirt_cmp(*lhs.devirt_target, *rhs.devirt_target);
+ }
+ return false;
+ }
+ MethodReferenceComparator devirt_cmp;
+ };
+
+ // Map invoke key (see MapEntry) to lowering info index.
+ typedef std::set<MapEntry, MapEntryComparator, ScopedArenaAllocatorAdapter<MapEntry> > InvokeMap;
+
+ ScopedArenaAllocator allocator(&cu_->arena_stack);
+
+ // All INVOKE instructions take 3 code units and there must also be a RETURN.
+ uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 3u;
+
+ // The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
+ // multi_index_container with one ordered index and one sequential index.
+ InvokeMap invoke_map(MapEntryComparator(), allocator.Adapter());
+ const MapEntry** sequential_entries = reinterpret_cast<const MapEntry**>(
+ allocator.Alloc(max_refs * sizeof(sequential_entries[0]), kArenaAllocMisc));
+
+ // Find INVOKE insns and their devirtualization targets.
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ if (bb->block_type != kDalvikByteCode) {
+ continue;
+ }
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->dalvikInsn.opcode >= Instruction::INVOKE_VIRTUAL &&
+ mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE_RANGE &&
+ mir->dalvikInsn.opcode != Instruction::RETURN_VOID_BARRIER) {
+ // Decode target method index and invoke type.
+ const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
+ uint16_t target_method_idx;
+ uint16_t invoke_type_idx;
+ if (mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE) {
+ target_method_idx = insn->VRegB_35c();
+ invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL;
+ } else {
+ target_method_idx = insn->VRegB_3rc();
+ invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL_RANGE;
+ }
+
+ // Find devirtualization target.
+ // TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
+ // ordered by dex pc as well? That would allow us to keep an iterator to devirt targets
+ // and increment it as needed instead of making O(log n) lookups.
+ const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
+ const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset);
+
+ // Try to insert a new entry. If the insertion fails, we will have found an old one.
+ MapEntry entry = {
+ devirt_target,
+ target_method_idx,
+ invoke_types[invoke_type_idx],
+ static_cast<uint32_t>(invoke_map.size())
+ };
+ auto it = invoke_map.insert(entry).first; // Iterator to either the old or the new entry.
+ mir->meta.method_lowering_info = it->lowering_info_index;
+ // If we didn't actually insert, this will just overwrite an existing value with the same.
+ sequential_entries[it->lowering_info_index] = &*it;
+ }
+ }
+ }
+
+ if (invoke_map.empty()) {
+ return;
+ }
+
+ // Prepare unique method infos, set method info indexes for their MIRs.
+ DCHECK_EQ(method_lowering_infos_.Size(), 0u);
+ const size_t count = invoke_map.size();
+ method_lowering_infos_.Resize(count);
+ for (size_t pos = 0u; pos != count; ++pos) {
+ const MapEntry* entry = sequential_entries[pos];
+ MirMethodLoweringInfo method_info(entry->target_method_idx,
+ static_cast<InvokeType>(entry->invoke_type));
+ if (entry->devirt_target != nullptr) {
+ method_info.SetDevirtualizationTarget(*entry->devirt_target);
+ }
+ method_lowering_infos_.Insert(method_info);
+ }
+ MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
+ method_lowering_infos_.GetRawStorage(), count);
+}
+
bool MIRGraph::SkipCompilation(const std::string& methodname) {
return cu_->compiler_driver->SkipCompilation(methodname);
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 868730f..0b50e2f 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -89,7 +89,8 @@
max_available_non_special_compiler_temps_(0),
punt_to_interpreter_(false),
ifield_lowering_infos_(arena, 0u),
- sfield_lowering_infos_(arena, 0u) {
+ sfield_lowering_infos_(arena, 0u),
+ method_lowering_infos_(arena, 0u) {
try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg))
- std::abs(static_cast<int>(kVRegTempBaseReg));
@@ -1176,6 +1177,7 @@
info->is_range = is_range;
info->index = mir->dalvikInsn.vB;
info->offset = mir->offset;
+ info->mir = mir;
return info;
}
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 94b3816..8a33414 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -20,8 +20,9 @@
#include "dex_file.h"
#include "dex_instruction.h"
#include "compiler_ir.h"
-#include "mir_field_info.h"
#include "invoke_type.h"
+#include "mir_field_info.h"
+#include "mir_method_info.h"
#include "utils/arena_bit_vector.h"
#include "utils/growable_array.h"
#include "reg_storage.h"
@@ -267,6 +268,8 @@
// SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
// the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
uint32_t sfield_lowering_info;
+ // INVOKE data index, points to MIRGraph::method_lowering_infos_.
+ uint32_t method_lowering_info;
} meta;
};
@@ -365,6 +368,7 @@
bool skip_this;
bool is_range;
DexOffset offset; // Offset in code units.
+ MIR* mir;
};
@@ -491,6 +495,13 @@
return sfield_lowering_infos_.GetRawStorage()[mir->meta.sfield_lowering_info];
}
+ void DoCacheMethodLoweringInfo();
+
+ const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) {
+ DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.Size());
+ return method_lowering_infos_.GetRawStorage()[mir->meta.method_lowering_info];
+ }
+
void InitRegLocations();
void RemapRegLocations();
@@ -950,6 +961,7 @@
bool punt_to_interpreter_; // Difficult or not worthwhile - just interpret.
GrowableArray<MirIFieldLoweringInfo> ifield_lowering_infos_;
GrowableArray<MirSFieldLoweringInfo> sfield_lowering_infos_;
+ GrowableArray<MirMethodLoweringInfo> method_lowering_infos_;
friend class LocalValueNumberingTest;
};
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
new file mode 100644
index 0000000..4580e76
--- /dev/null
+++ b/compiler/dex/mir_method_info.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+# include "mir_method_info.h"
+
+#include "driver/compiler_driver.h"
+#include "driver/dex_compilation_unit.h"
+#include "driver/compiler_driver-inl.h"
+#include "mirror/class_loader.h" // Only to allow casts in SirtRef<ClassLoader>.
+#include "mirror/dex_cache.h" // Only to allow casts in SirtRef<DexCache>.
+#include "scoped_thread_state_change.h"
+#include "sirt_ref.h"
+
+namespace art {
+
+void MirMethodLoweringInfo::Resolve(CompilerDriver* compiler_driver,
+ const DexCompilationUnit* mUnit,
+ MirMethodLoweringInfo* method_infos, size_t count) {
+ if (kIsDebugBuild) {
+ DCHECK(method_infos != nullptr);
+ DCHECK_NE(count, 0u);
+ for (auto it = method_infos, end = method_infos + count; it != end; ++it) {
+ MirMethodLoweringInfo unresolved(it->MethodIndex(), it->GetInvokeType());
+ if (it->target_dex_file_ != nullptr) {
+ unresolved.target_dex_file_ = it->target_dex_file_;
+ unresolved.target_method_idx_ = it->target_method_idx_;
+ }
+ DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+ }
+ }
+
+ // We're going to resolve methods and check access in a tight loop. It's better to hold
+ // the lock and needed references once than re-acquiring them again and again.
+ ScopedObjectAccess soa(Thread::Current());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+ compiler_driver->GetClassLoader(soa, mUnit));
+ SirtRef<mirror::Class> referrer_class(soa.Self(),
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ // Even if the referrer class is unresolved (i.e. we're compiling a method without class
+ // definition) we still want to resolve methods and record all available info.
+
+ for (auto it = method_infos, end = method_infos + count; it != end; ++it) {
+ // Remember devirtualized invoke target and set the called method to the default.
+ MethodReference devirt_ref(it->target_dex_file_, it->target_method_idx_);
+ MethodReference* devirt_target = (it->target_dex_file_ != nullptr) ? &devirt_ref : nullptr;
+ it->target_dex_file_ = mUnit->GetDexFile();
+ it->target_method_idx_ = it->MethodIndex();
+
+ InvokeType invoke_type = it->GetInvokeType();
+ mirror::ArtMethod* resolved_method =
+ compiler_driver->ResolveMethod(soa, dex_cache, class_loader, mUnit, it->MethodIndex(),
+ invoke_type);
+ if (UNLIKELY(resolved_method == nullptr)) {
+ continue;
+ }
+ compiler_driver->GetResolvedMethodDexFileLocation(resolved_method,
+ &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_method_idx_);
+ it->vtable_idx_ = compiler_driver->GetResolvedMethodVTableIndex(resolved_method, invoke_type);
+
+ MethodReference target_method(mUnit->GetDexFile(), it->MethodIndex());
+ int fast_path_flags = compiler_driver->IsFastInvoke(
+ soa, dex_cache, class_loader, mUnit, referrer_class.get(), resolved_method, &invoke_type,
+ &target_method, devirt_target, &it->direct_code_, &it->direct_method_);
+ uint16_t other_flags = it->flags_ & ~kFlagFastPath & ~(kInvokeTypeMask << kBitSharpTypeBegin);
+ it->flags_ = other_flags |
+ (fast_path_flags != 0 ? kFlagFastPath : 0u) |
+ (static_cast<uint16_t>(invoke_type) << kBitSharpTypeBegin);
+ it->target_dex_file_ = target_method.dex_file;
+ it->target_method_idx_ = target_method.dex_method_index;
+ it->stats_flags_ = fast_path_flags;
+ }
+}
+
+} // namespace art
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
new file mode 100644
index 0000000..a43238c
--- /dev/null
+++ b/compiler/dex/mir_method_info.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_MIR_METHOD_INFO_H_
+#define ART_COMPILER_DEX_MIR_METHOD_INFO_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "invoke_type.h"
+#include "method_reference.h"
+
+namespace art {
+
+class CompilerDriver;
+class DexCompilationUnit;
+class DexFile;
+
+class MirMethodInfo {
+ public:
+ uint16_t MethodIndex() const {
+ return method_idx_;
+ }
+
+ bool IsStatic() const {
+ return (flags_ & kFlagIsStatic) != 0u;
+ }
+
+ bool IsResolved() const {
+ return declaring_dex_file_ != nullptr;
+ }
+
+ const DexFile* DeclaringDexFile() const {
+ return declaring_dex_file_;
+ }
+
+ uint16_t DeclaringClassIndex() const {
+ return declaring_class_idx_;
+ }
+
+ uint16_t DeclaringMethodIndex() const {
+ return declaring_method_idx_;
+ }
+
+ protected:
+ enum {
+ kBitIsStatic = 0,
+ kMethodInfoBitEnd
+ };
+ COMPILE_ASSERT(kMethodInfoBitEnd <= 16, too_many_flags);
+ static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
+
+ MirMethodInfo(uint16_t method_idx, uint16_t flags)
+ : method_idx_(method_idx),
+ flags_(flags),
+ declaring_method_idx_(0u),
+ declaring_class_idx_(0u),
+ declaring_dex_file_(nullptr) {
+ }
+
+ // Make copy-ctor/assign/dtor protected to avoid slicing.
+ MirMethodInfo(const MirMethodInfo& other) = default;
+ MirMethodInfo& operator=(const MirMethodInfo& other) = default;
+ ~MirMethodInfo() = default;
+
+ // The method index in the compiling method's dex file.
+ uint16_t method_idx_;
+ // Flags, for volatility and derived class data.
+ uint16_t flags_;
+ // The method index in the dex file that defines the method, 0 if unresolved.
+ uint16_t declaring_method_idx_;
+ // The type index of the class declaring the method, 0 if unresolved.
+ uint16_t declaring_class_idx_;
+ // The dex file that defines the class containing the method and the method,
+ // nullptr if unresolved.
+ const DexFile* declaring_dex_file_;
+};
+
+class MirMethodLoweringInfo : public MirMethodInfo {
+ public:
+ // For each requested method retrieve the method's declaring location (dex file, class
+ // index and method index) and compute whether we can fast path the method call. For fast
+ // path methods, retrieve the method's vtable index and direct code and method when applicable.
+ static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
+ MirMethodLoweringInfo* method_infos, size_t count)
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+ MirMethodLoweringInfo(uint16_t method_idx, InvokeType type)
+ : MirMethodInfo(method_idx,
+ ((type == kStatic) ? kFlagIsStatic : 0u) |
+ (static_cast<uint16_t>(type) << kBitInvokeTypeBegin) |
+ (static_cast<uint16_t>(type) << kBitSharpTypeBegin)),
+ direct_code_(0u),
+ direct_method_(0u),
+ target_dex_file_(nullptr),
+ target_method_idx_(0u),
+ vtable_idx_(0u),
+ stats_flags_(0) {
+ }
+
+ void SetDevirtualizationTarget(const MethodReference& ref) {
+ DCHECK(target_dex_file_ == nullptr);
+ DCHECK_EQ(target_method_idx_, 0u);
+ DCHECK_LE(ref.dex_method_index, 0xffffu);
+ target_dex_file_ = ref.dex_file;
+ target_method_idx_ = ref.dex_method_index;
+ }
+
+ bool FastPath() const {
+ return (flags_ & kFlagFastPath) != 0u;
+ }
+
+ InvokeType GetInvokeType() const {
+ return static_cast<InvokeType>((flags_ >> kBitInvokeTypeBegin) & kInvokeTypeMask);
+ }
+
+ art::InvokeType GetSharpType() const {
+ return static_cast<InvokeType>((flags_ >> kBitSharpTypeBegin) & kInvokeTypeMask);
+ }
+
+ MethodReference GetTargetMethod() const {
+ return MethodReference(target_dex_file_, target_method_idx_);
+ }
+
+ uint16_t VTableIndex() const {
+ return vtable_idx_;
+ }
+
+ uintptr_t DirectCode() const {
+ return direct_code_;
+ }
+
+ uintptr_t DirectMethod() const {
+ return direct_method_;
+ }
+
+ int StatsFlags() const {
+ return stats_flags_;
+ }
+
+ private:
+ enum {
+ kBitFastPath = kMethodInfoBitEnd,
+ kBitInvokeTypeBegin,
+ kBitInvokeTypeEnd = kBitInvokeTypeBegin + 3, // 3 bits for invoke type.
+ kBitSharpTypeBegin,
+ kBitSharpTypeEnd = kBitSharpTypeBegin + 3, // 3 bits for sharp type.
+ kMethodLoweringInfoEnd = kBitSharpTypeEnd
+ };
+ COMPILE_ASSERT(kMethodLoweringInfoEnd <= 16, too_many_flags);
+ static constexpr uint16_t kFlagFastPath = 1u << kBitFastPath;
+ static constexpr uint16_t kInvokeTypeMask = 7u;
+ COMPILE_ASSERT((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask,
+ assert_invoke_type_bits_ok);
+ COMPILE_ASSERT((1u << (kBitSharpTypeEnd - kBitSharpTypeBegin)) - 1u == kInvokeTypeMask,
+ assert_sharp_type_bits_ok);
+
+ uintptr_t direct_code_;
+ uintptr_t direct_method_;
+ // Before Resolve(), target_dex_file_ and target_method_idx_ hold the verification-based
+ // devirtualized invoke target if available, nullptr and 0u otherwise.
+ // After Resolve() they hold the actual target method that will be called; it will be either
+ // a devirtualized target method or the compilation's unit's dex file and MethodIndex().
+ const DexFile* target_dex_file_;
+ uint16_t target_method_idx_;
+ uint16_t vtable_idx_;
+ int stats_flags_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_MIR_METHOD_INFO_H_
diff --git a/compiler/dex/pass_driver.cc b/compiler/dex/pass_driver.cc
index 256bcb1..291012f 100644
--- a/compiler/dex/pass_driver.cc
+++ b/compiler/dex/pass_driver.cc
@@ -92,6 +92,7 @@
*/
static const Pass* const passes[] = {
GetPassInstance<CacheFieldLoweringInfo>(),
+ GetPassInstance<CacheMethodLoweringInfo>(),
GetPassInstance<CodeLayout>(),
GetPassInstance<SSATransformation>(),
GetPassInstance<ConstantPropagation>(),
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 2f017c8..424cdd6 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -573,33 +573,32 @@
static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
int state,
const MethodReference& target_method,
- uint32_t method_idx,
- uintptr_t unused, uintptr_t unused2,
- InvokeType unused3) {
+ uint32_t unused, uintptr_t unused2,
+ uintptr_t unused3, InvokeType unused4) {
ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t method_idx, uintptr_t unused,
- uintptr_t unused2, InvokeType unused3) {
+ uint32_t unused, uintptr_t unused2,
+ uintptr_t unused3, InvokeType unused4) {
ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t method_idx, uintptr_t unused,
- uintptr_t unused2, InvokeType unused3) {
+ uint32_t unused, uintptr_t unused2,
+ uintptr_t unused3, InvokeType unused4) {
ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t method_idx, uintptr_t unused,
- uintptr_t unused2, InvokeType unused3) {
+ uint32_t unused, uintptr_t unused2,
+ uintptr_t unused3, InvokeType unused4) {
ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -607,9 +606,8 @@
static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused,
- uintptr_t unused2, uintptr_t unused3,
- InvokeType unused4) {
+ uint32_t unused, uintptr_t unused2,
+ uintptr_t unused3, InvokeType unused4) {
ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -1400,7 +1398,6 @@
return;
}
}
- InvokeType original_type = info->type; // avoiding mutation by ComputeInvokeInfo
int call_state = 0;
LIR* null_ck;
LIR** p_null_ck = NULL;
@@ -1409,19 +1406,12 @@
// Explicit register usage
LockCallTemps();
- DexCompilationUnit* cUnit = mir_graph_->GetCurrentDexCompilationUnit();
- MethodReference target_method(cUnit->GetDexFile(), info->index);
- int vtable_idx;
- uintptr_t direct_code;
- uintptr_t direct_method;
+ const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
+ cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
+ InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
+ info->type = static_cast<InvokeType>(method_info.GetSharpType());
+ bool fast_path = method_info.FastPath();
bool skip_this;
- bool fast_path =
- cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(),
- current_dalvik_offset_,
- true, true,
- &info->type, &target_method,
- &vtable_idx,
- &direct_code, &direct_method) && !SLOW_INVOKE_PATH;
if (info->type == kInterface) {
next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
skip_this = fast_path;
@@ -1443,29 +1433,29 @@
next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
skip_this = fast_path;
}
+ MethodReference target_method = method_info.GetTargetMethod();
if (!info->is_range) {
call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
- next_call_insn, target_method,
- vtable_idx, direct_code, direct_method,
+ next_call_insn, target_method, method_info.VTableIndex(),
+ method_info.DirectCode(), method_info.DirectMethod(),
original_type, skip_this);
} else {
call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
- next_call_insn, target_method, vtable_idx,
- direct_code, direct_method, original_type,
- skip_this);
+ next_call_insn, target_method, method_info.VTableIndex(),
+ method_info.DirectCode(), method_info.DirectMethod(),
+ original_type, skip_this);
}
// Finish up any of the call sequence not interleaved in arg loading
while (call_state >= 0) {
- call_state = next_call_insn(cu_, info, call_state, target_method,
- vtable_idx, direct_code, direct_method,
- original_type);
+ call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
+ method_info.DirectCode(), method_info.DirectMethod(), original_type);
}
LIR* call_inst;
if (cu_->instruction_set != kX86) {
call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
} else {
if (fast_path) {
- if (direct_code == static_cast<unsigned int>(-1)) {
+ if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
// We can have the linker fixup a call relative.
call_inst =
reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 1499ae4..664f809 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -21,8 +21,11 @@
#include "dex/compiler_ir.h"
#include "mirror/art_field.h"
#include "mirror/art_field-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
+#include "mirror/dex_cache-inl.h"
#include "mirror/art_field-inl.h"
#include "scoped_thread_state_change.h"
#include "sirt_ref-inl.h"
@@ -161,6 +164,131 @@
return std::make_pair(false, false);
}
+inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ uint32_t method_idx, InvokeType invoke_type) {
+ DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
+ DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ mirror::ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod(
+ *mUnit->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type);
+ DCHECK_EQ(resolved_method == nullptr, soa.Self()->IsExceptionPending());
+ if (UNLIKELY(resolved_method == nullptr)) {
+ // Clean up any exception left by type resolution.
+ soa.Self()->ClearException();
+ return nullptr;
+ }
+ if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
+ // Silently return nullptr on incompatible class change.
+ return nullptr;
+ }
+ return resolved_method;
+}
+
+inline void CompilerDriver::GetResolvedMethodDexFileLocation(
+ mirror::ArtMethod* resolved_method, const DexFile** declaring_dex_file,
+ uint16_t* declaring_class_idx, uint16_t* declaring_method_idx) {
+ mirror::Class* declaring_class = resolved_method->GetDeclaringClass();
+ *declaring_dex_file = declaring_class->GetDexCache()->GetDexFile();
+ *declaring_class_idx = declaring_class->GetDexTypeIndex();
+ *declaring_method_idx = resolved_method->GetDexMethodIndex();
+}
+
+inline uint16_t CompilerDriver::GetResolvedMethodVTableIndex(
+ mirror::ArtMethod* resolved_method, InvokeType type) {
+ if (type == kVirtual || type == kSuper) {
+ return resolved_method->GetMethodIndex();
+ } else if (type == kInterface) {
+ return resolved_method->GetDexMethodIndex();
+ } else {
+ return DexFile::kDexNoIndex16;
+ }
+}
+
+inline int CompilerDriver::IsFastInvoke(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type,
+ MethodReference* target_method, const MethodReference* devirt_target,
+ uintptr_t* direct_code, uintptr_t* direct_method) {
+ // Don't try to fast-path if we don't understand the caller's class.
+ if (UNLIKELY(referrer_class == nullptr)) {
+ return 0;
+ }
+ mirror::Class* methods_class = resolved_method->GetDeclaringClass();
+ if (UNLIKELY(!referrer_class->CanAccessResolvedMethod(methods_class, resolved_method,
+ dex_cache.get(),
+ target_method->dex_method_index))) {
+ return 0;
+ }
+
+ // Sharpen a virtual call into a direct call when the target is known not to have been
+ // overridden (ie is final).
+ bool can_sharpen_virtual_based_on_type =
+ (*invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal());
+ // For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
+ // the super class.
+ bool can_sharpen_super_based_on_type = (*invoke_type == kSuper) &&
+ (referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) &&
+ resolved_method->GetMethodIndex() < methods_class->GetVTable()->GetLength() &&
+ (methods_class->GetVTable()->Get(resolved_method->GetMethodIndex()) == resolved_method);
+
+ if (can_sharpen_virtual_based_on_type || can_sharpen_super_based_on_type) {
+ // Sharpen a virtual call into a direct call. The method_idx is into referrer's
+ // dex cache, check that this resolved method is where we expect it.
+ CHECK(target_method->dex_file == mUnit->GetDexFile());
+ DCHECK(dex_cache.get() == mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index) ==
+ resolved_method) << PrettyMethod(resolved_method);
+ int stats_flags = kFlagMethodResolved;
+ GetCodeAndMethodForDirectCall(invoke_type, kDirect, false, referrer_class, resolved_method,
+ &stats_flags, target_method, direct_code, direct_method);
+ DCHECK_NE(*invoke_type, kSuper) << PrettyMethod(resolved_method);
+ if (*invoke_type == kDirect) {
+ stats_flags |= kFlagsMethodResolvedVirtualMadeDirect;
+ }
+ return stats_flags;
+ }
+
+ if ((*invoke_type == kVirtual || *invoke_type == kInterface) && devirt_target != nullptr) {
+ // Post-verification callback recorded a more precise invoke target based on its type info.
+ mirror::ArtMethod* called_method;
+ ClassLinker* class_linker = mUnit->GetClassLinker();
+ if (LIKELY(devirt_target->dex_file == mUnit->GetDexFile())) {
+ called_method = class_linker->ResolveMethod(*devirt_target->dex_file,
+ devirt_target->dex_method_index,
+ dex_cache, class_loader, NULL, kVirtual);
+ } else {
+ SirtRef<mirror::DexCache> target_dex_cache(soa.Self(),
+ class_linker->FindDexCache(*devirt_target->dex_file));
+ called_method = class_linker->ResolveMethod(*devirt_target->dex_file,
+ devirt_target->dex_method_index,
+ target_dex_cache, class_loader, NULL, kVirtual);
+ }
+ CHECK(called_method != NULL);
+ CHECK(!called_method->IsAbstract());
+ int stats_flags = kFlagMethodResolved;
+ GetCodeAndMethodForDirectCall(invoke_type, kDirect, true, referrer_class, called_method,
+ &stats_flags, target_method, direct_code, direct_method);
+ DCHECK_NE(*invoke_type, kSuper);
+ if (*invoke_type == kDirect) {
+ stats_flags |= kFlagsMethodResolvedPreciseTypeDevirtualization;
+ }
+ return stats_flags;
+ }
+
+ if (UNLIKELY(*invoke_type == kSuper)) {
+ // Unsharpened super calls are suspicious so go slow-path.
+ return 0;
+ }
+
+ // Sharpening failed so generate a regular resolved method dispatch.
+ int stats_flags = kFlagMethodResolved;
+ GetCodeAndMethodForDirectCall(invoke_type, *invoke_type, false, referrer_class, resolved_method,
+ &stats_flags, target_method, direct_code, direct_method);
+ return stats_flags;
+}
+
} // namespace art
#endif // ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index a46015d..7c4a6f7 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -71,7 +71,7 @@
LOG(INFO) << Percentage(x, y) << "% of " << str << " for " << (x + y) << " cases";
}
-class AOTCompilationStats {
+class CompilerDriver::AOTCompilationStats {
public:
AOTCompilationStats()
: stats_lock_("AOT compilation statistics lock"),
@@ -242,6 +242,30 @@
direct_methods_to_boot_[type]++;
}
+ void ProcessedInvoke(InvokeType type, int flags) {
+ STATS_LOCK();
+ if (flags == 0) {
+ unresolved_methods_[type]++;
+ } else {
+ DCHECK_NE((flags & kFlagMethodResolved), 0);
+ resolved_methods_[type]++;
+ if ((flags & kFlagVirtualMadeDirect) != 0) {
+ virtual_made_direct_[type]++;
+ if ((flags & kFlagPreciseTypeDevirtualization) != 0) {
+ type_based_devirtualization_++;
+ }
+ } else {
+ DCHECK_EQ((flags & kFlagPreciseTypeDevirtualization), 0);
+ }
+ if ((flags & kFlagDirectCallToBoot) != 0) {
+ direct_calls_to_boot_[type]++;
+ }
+ if ((flags & kFlagDirectMethodToBoot) != 0) {
+ direct_methods_to_boot_[type]++;
+ }
+ }
+ }
+
// A check-cast could be eliminated due to verifier type analysis.
void SafeCast() {
STATS_LOCK();
@@ -933,32 +957,8 @@
}
}
-static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
- SirtRef<mirror::DexCache>& dex_cache,
- const DexCompilationUnit* mUnit)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // The passed dex_cache is a hint, sanity check before asking the class linker that will take a
- // lock.
- if (dex_cache->GetDexFile() != mUnit->GetDexFile()) {
- dex_cache.reset(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- }
- SirtRef<mirror::ClassLoader>
- class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- const DexFile::MethodId& referrer_method_id =
- mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
- return mUnit->GetClassLinker()->ResolveType(*mUnit->GetDexFile(), referrer_method_id.class_idx_,
- dex_cache, class_loader);
-}
-
-static mirror::ArtMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& soa,
- const DexCompilationUnit* mUnit,
- uint32_t method_idx,
- InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- return mUnit->GetClassLinker()->ResolveMethod(*mUnit->GetDexFile(), method_idx, dex_cache,
- class_loader, NULL, type);
+void CompilerDriver::ProcessedInvoke(InvokeType invoke_type, int flags) {
+ stats_->ProcessedInvoke(invoke_type, flags);
}
bool CompilerDriver::ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,
@@ -1065,7 +1065,7 @@
bool no_guarantee_of_dex_cache_entry,
mirror::Class* referrer_class,
mirror::ArtMethod* method,
- bool update_stats,
+ int* stats_flags,
MethodReference* target_method,
uintptr_t* direct_code,
uintptr_t* direct_method) {
@@ -1103,9 +1103,8 @@
}
}
}
- if (update_stats && method_code_in_boot) {
- stats_->DirectCallsToBoot(*type);
- stats_->DirectMethodsToBoot(*type);
+ if (method_code_in_boot) {
+ *stats_flags |= kFlagDirectCallToBoot | kFlagDirectMethodToBoot;
}
if (!use_dex_cache && compiling_boot) {
MethodHelper mh(method);
@@ -1174,110 +1173,63 @@
InvokeType* invoke_type, MethodReference* target_method,
int* vtable_idx, uintptr_t* direct_code,
uintptr_t* direct_method) {
+ InvokeType orig_invoke_type = *invoke_type;
+ int stats_flags = 0;
ScopedObjectAccess soa(Thread::Current());
- *vtable_idx = -1;
- *direct_code = 0;
- *direct_method = 0;
- mirror::ArtMethod* resolved_method =
- ComputeMethodReferencedFromCompilingMethod(soa, mUnit, target_method->dex_method_index,
- *invoke_type);
- if (resolved_method != NULL) {
- if (*invoke_type == kVirtual || *invoke_type == kSuper) {
- *vtable_idx = resolved_method->GetMethodIndex();
- } else if (*invoke_type == kInterface) {
- *vtable_idx = resolved_method->GetDexMethodIndex();
- }
- // Don't try to fast-path if we don't understand the caller's class or this appears to be an
- // Incompatible Class Change Error.
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), resolved_method->GetDeclaringClass()->GetDexCache());
- mirror::Class* referrer_class =
- ComputeCompilingMethodsClass(soa, dex_cache, mUnit);
- bool icce = resolved_method->CheckIncompatibleClassChange(*invoke_type);
- if (referrer_class != NULL && !icce) {
- mirror::Class* methods_class = resolved_method->GetDeclaringClass();
- if (referrer_class->CanAccessResolvedMethod(methods_class, resolved_method, dex_cache.get(),
- target_method->dex_method_index)) {
- const bool enableFinalBasedSharpening = enable_devirtualization;
- // Sharpen a virtual call into a direct call when the target is known not to have been
- // overridden (ie is final).
- bool can_sharpen_virtual_based_on_type =
- (*invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal());
- // For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
- // the super class.
- bool can_sharpen_super_based_on_type = (*invoke_type == kSuper) &&
- (referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) &&
- resolved_method->GetMethodIndex() < methods_class->GetVTable()->GetLength() &&
- (methods_class->GetVTable()->Get(resolved_method->GetMethodIndex()) == resolved_method);
+ // Try to resolve the method and compiling method's class.
+ mirror::ArtMethod* resolved_method;
+ mirror::Class* referrer_class;
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(),
+ mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+ soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ {
+ uint32_t method_idx = target_method->dex_method_index;
+ SirtRef<mirror::ArtMethod> resolved_method_sirt(soa.Self(),
+ ResolveMethod(soa, dex_cache, class_loader, mUnit, method_idx, orig_invoke_type));
+ referrer_class = (resolved_method_sirt.get() != nullptr)
+ ? ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr;
+ resolved_method = resolved_method_sirt.get();
+ }
+ bool result = false;
+ if (resolved_method != nullptr) {
+ *vtable_idx = GetResolvedMethodVTableIndex(resolved_method, orig_invoke_type);
- if (enableFinalBasedSharpening && (can_sharpen_virtual_based_on_type ||
- can_sharpen_super_based_on_type)) {
- // Sharpen a virtual call into a direct call. The method_idx is into the DexCache
- // associated with target_method->dex_file.
- CHECK(target_method->dex_file == mUnit->GetDexFile());
- DCHECK(dex_cache.get() == mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- CHECK(dex_cache->GetResolvedMethod(target_method->dex_method_index) ==
- resolved_method) << PrettyMethod(resolved_method);
- InvokeType orig_invoke_type = *invoke_type;
- GetCodeAndMethodForDirectCall(invoke_type, kDirect, false, referrer_class, resolved_method,
- update_stats, target_method, direct_code, direct_method);
- if (update_stats && (*invoke_type == kDirect)) {
- stats_->ResolvedMethod(orig_invoke_type);
- stats_->VirtualMadeDirect(orig_invoke_type);
- }
- DCHECK_NE(*invoke_type, kSuper) << PrettyMethod(resolved_method);
- return true;
- }
- const bool enableVerifierBasedSharpening = enable_devirtualization;
- if (enableVerifierBasedSharpening && (*invoke_type == kVirtual ||
- *invoke_type == kInterface)) {
- // Did the verifier record a more precise invoke target based on its type information?
- DCHECK(mUnit->GetVerifiedMethod() != nullptr);
- const MethodReference* devirt_map_target =
- mUnit->GetVerifiedMethod()->GetDevirtTarget(dex_pc);
- if (devirt_map_target != NULL) {
- SirtRef<mirror::DexCache> target_dex_cache(soa.Self(), mUnit->GetClassLinker()->FindDexCache(*devirt_map_target->dex_file));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- mirror::ArtMethod* called_method =
- mUnit->GetClassLinker()->ResolveMethod(*devirt_map_target->dex_file,
- devirt_map_target->dex_method_index,
- target_dex_cache, class_loader, NULL,
- kVirtual);
- CHECK(called_method != NULL);
- CHECK(!called_method->IsAbstract());
- InvokeType orig_invoke_type = *invoke_type;
- GetCodeAndMethodForDirectCall(invoke_type, kDirect, true, referrer_class, called_method,
- update_stats, target_method, direct_code, direct_method);
- if (update_stats && (*invoke_type == kDirect)) {
- stats_->ResolvedMethod(orig_invoke_type);
- stats_->VirtualMadeDirect(orig_invoke_type);
- stats_->PreciseTypeDevirtualization();
- }
- DCHECK_NE(*invoke_type, kSuper);
- return true;
- }
- }
- if (*invoke_type == kSuper) {
- // Unsharpened super calls are suspicious so go slow-path.
- } else {
- // Sharpening failed so generate a regular resolved method dispatch.
- if (update_stats) {
- stats_->ResolvedMethod(*invoke_type);
- }
- GetCodeAndMethodForDirectCall(invoke_type, *invoke_type, false, referrer_class, resolved_method,
- update_stats, target_method, direct_code, direct_method);
- return true;
- }
+ if (enable_devirtualization) {
+ DCHECK(mUnit->GetVerifiedMethod() != nullptr);
+ const MethodReference* devirt_target = mUnit->GetVerifiedMethod()->GetDevirtTarget(dex_pc);
+
+ stats_flags = IsFastInvoke(
+ soa, dex_cache, class_loader, mUnit, referrer_class, resolved_method,
+ invoke_type, target_method, devirt_target, direct_code, direct_method);
+ result = stats_flags != 0;
+ } else {
+ // Devirtualization not enabled. Inline IsFastInvoke(), dropping the devirtualization parts.
+ if (UNLIKELY(referrer_class == nullptr) ||
+ UNLIKELY(!referrer_class->CanAccessResolvedMethod(resolved_method->GetDeclaringClass(),
+ resolved_method, dex_cache.get(),
+ target_method->dex_method_index)) ||
+ *invoke_type == kSuper) {
+ // Slow path. (Without devirtualization, all super calls go slow path as well.)
+ } else {
+ // Sharpening failed so generate a regular resolved method dispatch.
+ stats_flags = kFlagMethodResolved;
+ GetCodeAndMethodForDirectCall(invoke_type, *invoke_type, false, referrer_class, resolved_method,
+ &stats_flags, target_method, direct_code, direct_method);
+ result = true;
}
}
}
- // Clean up any exception left by method/invoke_type resolution
- if (soa.Self()->IsExceptionPending()) {
- soa.Self()->ClearException();
+ if (!result) {
+ // Conservative defaults.
+ *vtable_idx = -1;
+ *direct_code = 0u;
+ *direct_method = 0u;
}
if (update_stats) {
- stats_->UnresolvedMethod(*invoke_type);
+ ProcessedInvoke(orig_invoke_type, stats_flags);
}
- return false; // Incomplete knowledge needs slow path.
+ return result;
}
const VerifiedMethod* CompilerDriver::GetVerifiedMethod(const DexFile* dex_file,
@@ -1297,7 +1249,6 @@
return result;
}
-
void CompilerDriver::AddCodePatch(const DexFile* dex_file,
uint16_t referrer_class_def_idx,
uint32_t referrer_method_idx,
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 817da17..26210c9 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -44,7 +44,6 @@
class MethodVerifier;
} // namespace verifier
-class AOTCompilationStats;
class CompilerOptions;
class DexCompilationUnit;
class DexFileToMethodInlinerMap;
@@ -256,8 +255,37 @@
uint32_t* storage_index, bool* is_referrers_class, bool* is_initialized)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Resolve a method. Returns nullptr on failure, including incompatible class change.
+ mirror::ArtMethod* ResolveMethod(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ uint32_t method_idx, InvokeType invoke_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Get declaration location of a resolved field.
+ void GetResolvedMethodDexFileLocation(
+ mirror::ArtMethod* resolved_method, const DexFile** declaring_dex_file,
+ uint16_t* declaring_class_idx, uint16_t* declaring_method_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Get declaration location of a resolved field.
+ uint16_t GetResolvedMethodVTableIndex(
+ mirror::ArtMethod* resolved_method, InvokeType type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value
+ // for ProcessedInvoke() and computes the necessary lowering info.
+ int IsFastInvoke(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type,
+ MethodReference* target_method, const MethodReference* devirt_target,
+ uintptr_t* direct_code, uintptr_t* direct_method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
void ProcessedInstanceField(bool resolved);
void ProcessedStaticField(bool resolved, bool local);
+ void ProcessedInvoke(InvokeType invoke_type, int flags);
// Can we fast path instance field access in a verified accessor?
// If yes, computes field's offset and volatility and whether the method is static or not.
@@ -594,16 +622,37 @@
bool SkipCompilation(const std::string& method_name);
private:
- // Compute constant code and method pointers when possible
+ // These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
+ // The only external contract is that unresolved method has flags 0 and resolved non-0.
+ enum {
+ kBitMethodResolved = 0,
+ kBitVirtualMadeDirect,
+ kBitPreciseTypeDevirtualization,
+ kBitDirectCallToBoot,
+ kBitDirectMethodToBoot
+ };
+ static constexpr int kFlagMethodResolved = 1 << kBitMethodResolved;
+ static constexpr int kFlagVirtualMadeDirect = 1 << kBitVirtualMadeDirect;
+ static constexpr int kFlagPreciseTypeDevirtualization = 1 << kBitPreciseTypeDevirtualization;
+ static constexpr int kFlagDirectCallToBoot = 1 << kBitDirectCallToBoot;
+ static constexpr int kFlagDirectMethodToBoot = 1 << kBitDirectMethodToBoot;
+ static constexpr int kFlagsMethodResolvedVirtualMadeDirect =
+ kFlagMethodResolved | kFlagVirtualMadeDirect;
+ static constexpr int kFlagsMethodResolvedPreciseTypeDevirtualization =
+ kFlagsMethodResolvedVirtualMadeDirect | kFlagPreciseTypeDevirtualization;
+
+ public: // TODO make private or eliminate.
+ // Compute constant code and method pointers when possible.
void GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType sharp_type,
bool no_guarantee_of_dex_cache_entry,
mirror::Class* referrer_class,
mirror::ArtMethod* method,
- bool update_stats,
+ int* stats_flags,
MethodReference* target_method,
uintptr_t* direct_code, uintptr_t* direct_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ private:
void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
@@ -688,6 +737,7 @@
size_t thread_count_;
uint64_t start_ns_;
+ class AOTCompilationStats;
UniquePtr<AOTCompilationStats> stats_;
bool dump_stats_;