Merge "Move inline caches GC handling in JitCodeCache."
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 16a465a..01e89bb 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -258,6 +258,40 @@
ProfilingInfo* const profiling_info_;
};
+static bool IsMonomorphic(Handle<mirror::ObjectArray<mirror::Class>> classes)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK_GE(InlineCache::kIndividualCacheSize, 2);
+ return classes->Get(0) != nullptr && classes->Get(1) == nullptr;
+}
+
+static bool IsMegamorphic(Handle<mirror::ObjectArray<mirror::Class>> classes)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
+ if (classes->Get(i) == nullptr) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static mirror::Class* GetMonomorphicType(Handle<mirror::ObjectArray<mirror::Class>> classes)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(classes->Get(0) != nullptr);
+ return classes->Get(0);
+}
+
+static bool IsUninitialized(Handle<mirror::ObjectArray<mirror::Class>> classes)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return classes->Get(0) == nullptr;
+}
+
+static bool IsPolymorphic(Handle<mirror::ObjectArray<mirror::Class>> classes)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK_GE(InlineCache::kIndividualCacheSize, 3);
+ return classes->Get(1) != nullptr &&
+ classes->Get(InlineCache::kIndividualCacheSize - 1) == nullptr;
+}
+
bool HInliner::TryInline(HInvoke* invoke_instruction) {
if (invoke_instruction->IsInvokeUnresolved()) {
return false; // Don't bother to move further if we know the method is unresolved.
@@ -301,31 +335,48 @@
ScopedProfilingInfoInlineUse spiis(caller, soa.Self());
ProfilingInfo* profiling_info = spiis.GetProfilingInfo();
if (profiling_info != nullptr) {
- const InlineCache& ic = *profiling_info->GetInlineCache(invoke_instruction->GetDexPc());
- if (ic.IsUninitialized()) {
- VLOG(compiler) << "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(method_index)
- << " is not hit and not inlined";
+ StackHandleScope<1> hs(soa.Self());
+ ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
+ Handle<mirror::ObjectArray<mirror::Class>> inline_cache = hs.NewHandle(
+ mirror::ObjectArray<mirror::Class>::Alloc(
+ soa.Self(),
+ class_linker->GetClassRoot(ClassLinker::kClassArrayClass),
+ InlineCache::kIndividualCacheSize));
+ if (inline_cache.Get() == nullptr) {
+ // We got an OOME. Just clear the exception, and don't inline.
+ DCHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
+ VLOG(compiler) << "Out of memory in the compiler when trying to inline";
return false;
- } else if (ic.IsMonomorphic()) {
- MaybeRecordStat(kMonomorphicCall);
- if (outermost_graph_->IsCompilingOsr()) {
- // If we are compiling OSR, we pretend this call is polymorphic, as we may come from the
- // interpreter and it may have seen different receiver types.
- return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic);
- } else {
- return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic);
- }
- } else if (ic.IsPolymorphic()) {
- MaybeRecordStat(kPolymorphicCall);
- return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic);
} else {
- DCHECK(ic.IsMegamorphic());
- VLOG(compiler) << "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(method_index)
- << " is megamorphic and not inlined";
- MaybeRecordStat(kMegamorphicCall);
- return false;
+ Runtime::Current()->GetJit()->GetCodeCache()->CopyInlineCacheInto(
+ *profiling_info->GetInlineCache(invoke_instruction->GetDexPc()),
+ inline_cache);
+ if (IsUninitialized(inline_cache)) {
+ VLOG(compiler) << "Interface or virtual call to "
+ << caller_dex_file.PrettyMethod(method_index)
+ << " is not hit and not inlined";
+ return false;
+ } else if (IsMonomorphic(inline_cache)) {
+ MaybeRecordStat(kMonomorphicCall);
+ if (outermost_graph_->IsCompilingOsr()) {
+ // If we are compiling OSR, we pretend this call is polymorphic, as we may come from the
+ // interpreter and it may have seen different receiver types.
+ return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
+ } else {
+ return TryInlineMonomorphicCall(invoke_instruction, resolved_method, inline_cache);
+ }
+ } else if (IsPolymorphic(inline_cache)) {
+ MaybeRecordStat(kPolymorphicCall);
+ return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
+ } else {
+ DCHECK(IsMegamorphic(inline_cache));
+ VLOG(compiler) << "Interface or virtual call to "
+ << caller_dex_file.PrettyMethod(method_index)
+ << " is megamorphic and not inlined";
+ MaybeRecordStat(kMegamorphicCall);
+ return false;
+ }
}
}
}
@@ -358,13 +409,13 @@
bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
- const InlineCache& ic) {
+ Handle<mirror::ObjectArray<mirror::Class>> classes) {
DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface())
<< invoke_instruction->DebugName();
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
dex::TypeIndex class_index = FindClassIndexIn(
- ic.GetMonomorphicType(), caller_dex_file, caller_compilation_unit_.GetDexCache());
+ GetMonomorphicType(classes), caller_dex_file, caller_compilation_unit_.GetDexCache());
if (!class_index.IsValid()) {
VLOG(compiler) << "Call to " << ArtMethod::PrettyMethod(resolved_method)
<< " from inline cache is not inlined because its class is not"
@@ -375,11 +426,11 @@
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
PointerSize pointer_size = class_linker->GetImagePointerSize();
if (invoke_instruction->IsInvokeInterface()) {
- resolved_method = ic.GetMonomorphicType()->FindVirtualMethodForInterface(
+ resolved_method = GetMonomorphicType(classes)->FindVirtualMethodForInterface(
resolved_method, pointer_size);
} else {
DCHECK(invoke_instruction->IsInvokeVirtual());
- resolved_method = ic.GetMonomorphicType()->FindVirtualMethodForVirtual(
+ resolved_method = GetMonomorphicType(classes)->FindVirtualMethodForVirtual(
resolved_method, pointer_size);
}
DCHECK(resolved_method != nullptr);
@@ -393,7 +444,7 @@
// We successfully inlined, now add a guard.
bool is_referrer =
- (ic.GetMonomorphicType() == outermost_graph_->GetArtMethod()->GetDeclaringClass());
+ (GetMonomorphicType(classes) == outermost_graph_->GetArtMethod()->GetDeclaringClass());
AddTypeGuard(receiver,
cursor,
bb_cursor,
@@ -457,11 +508,11 @@
bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
- const InlineCache& ic) {
+ Handle<mirror::ObjectArray<mirror::Class>> classes) {
DCHECK(invoke_instruction->IsInvokeVirtual() || invoke_instruction->IsInvokeInterface())
<< invoke_instruction->DebugName();
- if (TryInlinePolymorphicCallToSameTarget(invoke_instruction, resolved_method, ic)) {
+ if (TryInlinePolymorphicCallToSameTarget(invoke_instruction, resolved_method, classes)) {
return true;
}
@@ -472,16 +523,16 @@
bool all_targets_inlined = true;
bool one_target_inlined = false;
for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
- if (ic.GetTypeAt(i) == nullptr) {
+ if (classes->Get(i) == nullptr) {
break;
}
ArtMethod* method = nullptr;
if (invoke_instruction->IsInvokeInterface()) {
- method = ic.GetTypeAt(i)->FindVirtualMethodForInterface(
+ method = classes->Get(i)->FindVirtualMethodForInterface(
resolved_method, pointer_size);
} else {
DCHECK(invoke_instruction->IsInvokeVirtual());
- method = ic.GetTypeAt(i)->FindVirtualMethodForVirtual(
+ method = classes->Get(i)->FindVirtualMethodForVirtual(
resolved_method, pointer_size);
}
@@ -490,20 +541,20 @@
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
dex::TypeIndex class_index = FindClassIndexIn(
- ic.GetTypeAt(i), caller_dex_file, caller_compilation_unit_.GetDexCache());
+ classes->Get(i), caller_dex_file, caller_compilation_unit_.GetDexCache());
HInstruction* return_replacement = nullptr;
if (!class_index.IsValid() ||
!TryBuildAndInline(invoke_instruction, method, &return_replacement)) {
all_targets_inlined = false;
} else {
one_target_inlined = true;
- bool is_referrer = (ic.GetTypeAt(i) == outermost_graph_->GetArtMethod()->GetDeclaringClass());
+ bool is_referrer = (classes->Get(i) == outermost_graph_->GetArtMethod()->GetDeclaringClass());
// If we have inlined all targets before, and this receiver is the last seen,
// we deoptimize instead of keeping the original invoke instruction.
bool deoptimize = all_targets_inlined &&
(i != InlineCache::kIndividualCacheSize - 1) &&
- (ic.GetTypeAt(i + 1) == nullptr);
+ (classes->Get(i + 1) == nullptr);
if (outermost_graph_->IsCompilingOsr()) {
// We do not support HDeoptimize in OSR methods.
@@ -618,9 +669,10 @@
merge, original_invoke_block, /* replace_if_back_edge */ true);
}
-bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
- ArtMethod* resolved_method,
- const InlineCache& ic) {
+bool HInliner::TryInlinePolymorphicCallToSameTarget(
+ HInvoke* invoke_instruction,
+ ArtMethod* resolved_method,
+ Handle<mirror::ObjectArray<mirror::Class>> classes) {
// This optimization only works under JIT for now.
DCHECK(Runtime::Current()->UseJitCompilation());
if (graph_->GetInstructionSet() == kMips64) {
@@ -639,12 +691,12 @@
// Check whether we are actually calling the same method among
// the different types seen.
for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
- if (ic.GetTypeAt(i) == nullptr) {
+ if (classes->Get(i) == nullptr) {
break;
}
ArtMethod* new_method = nullptr;
if (invoke_instruction->IsInvokeInterface()) {
- new_method = ic.GetTypeAt(i)->GetImt(pointer_size)->Get(
+ new_method = classes->Get(i)->GetImt(pointer_size)->Get(
method_index, pointer_size);
if (new_method->IsRuntimeMethod()) {
// Bail out as soon as we see a conflict trampoline in one of the target's
@@ -653,7 +705,7 @@
}
} else {
DCHECK(invoke_instruction->IsInvokeVirtual());
- new_method = ic.GetTypeAt(i)->GetEmbeddedVTableEntry(method_index, pointer_size);
+ new_method = classes->Get(i)->GetEmbeddedVTableEntry(method_index, pointer_size);
}
DCHECK(new_method != nullptr);
if (actual_method == nullptr) {
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 682393e..a2b4fc9 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -28,7 +28,6 @@
class DexCompilationUnit;
class HGraph;
class HInvoke;
-class InlineCache;
class OptimizingCompilerStats;
class HInliner : public HOptimization {
@@ -105,18 +104,18 @@
// ... // inlined code
bool TryInlineMonomorphicCall(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
- const InlineCache& ic)
+ Handle<mirror::ObjectArray<mirror::Class>> classes)
REQUIRES_SHARED(Locks::mutator_lock_);
// Try to inline targets of a polymorphic call.
bool TryInlinePolymorphicCall(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
- const InlineCache& ic)
+ Handle<mirror::ObjectArray<mirror::Class>> classes)
REQUIRES_SHARED(Locks::mutator_lock_);
bool TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
- const InlineCache& ic)
+ Handle<mirror::ObjectArray<mirror::Class>> classes)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 2dfdc16..1e809d5 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -466,20 +466,6 @@
klass, this));
interface_method->VisitRoots(visitor, pointer_size);
}
- // We know we don't have profiling information if the class hasn't been verified. Note
- // that this check also ensures the IsNative call can be made, as IsNative expects a fully
- // created class (and not a retired one).
- if (klass->IsVerified()) {
- // Runtime methods and native methods use the same field as the profiling info for
- // storing their own data (jni entrypoint for native methods, and ImtConflictTable for
- // some runtime methods).
- if (!IsNative<kReadBarrierOption>() && !IsRuntimeMethod()) {
- ProfilingInfo* profiling_info = GetProfilingInfo(pointer_size);
- if (profiling_info != nullptr) {
- profiling_info->VisitRoots(visitor);
- }
- }
- }
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f0e619d..6a97edd 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2742,12 +2742,6 @@
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
- // It's time to clear all inline caches, in case some classes can be unloaded.
- if (((gc_type == collector::kGcTypeFull) || (gc_type == collector::kGcTypePartial)) &&
- (runtime->GetJit() != nullptr)) {
- runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self);
- }
-
CHECK(collector != nullptr)
<< "Could not find garbage collector with collector_type="
<< static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 3531852..1c47e7e 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -133,7 +133,7 @@
size_t max_capacity,
bool garbage_collect_code)
: lock_("Jit code cache", kJitCodeCacheLock),
- lock_cond_("Jit code cache variable", lock_),
+ lock_cond_("Jit code cache condition variable", lock_),
collection_in_progress_(false),
code_map_(code_map),
data_map_(data_map),
@@ -152,7 +152,9 @@
number_of_collections_(0),
histogram_stack_map_memory_use_("Memory used for stack maps", 16),
histogram_code_memory_use_("Memory used for compiled code", 16),
- histogram_profiling_info_memory_use_("Memory used for profiling info", 16) {
+ histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
+ is_weak_access_enabled_(true),
+ inline_cache_cond_("Jit inline cache condition variable", lock_) {
DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
@@ -327,6 +329,34 @@
}
}
}
+ // Walk over inline caches to clear entries containing unloaded classes.
+ for (ProfilingInfo* info : profiling_infos_) {
+ for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
+ InlineCache* cache = &info->cache_[i];
+ for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
+ // This does not need a read barrier because this is called by GC.
+ mirror::Class* cls = cache->classes_[j].Read<kWithoutReadBarrier>();
+ if (cls != nullptr) {
+ // Look at the classloader of the class to know if it has been
+ // unloaded.
+ // This does not need a read barrier because this is called by GC.
+ mirror::Object* class_loader =
+ cls->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>();
+ if (visitor->IsMarked(class_loader) != nullptr) {
+ // The class loader is live, update the entry if the class has moved.
+ mirror::Class* new_cls = down_cast<mirror::Class*>(visitor->IsMarked(cls));
+ // Note that new_object can be null for CMS and newly allocated objects.
+ if (new_cls != nullptr && new_cls != cls) {
+ cache->classes_[j] = GcRoot<mirror::Class>(new_cls);
+ }
+ } else {
+ // The class loader is not live, clear the entry.
+ cache->classes_[j] = GcRoot<mirror::Class>(nullptr);
+ }
+ }
+ }
+ }
+ }
}
void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
@@ -375,11 +405,51 @@
}
}
-void JitCodeCache::ClearGcRootsInInlineCaches(Thread* self) {
+bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
+ return kUseReadBarrier
+ ? self->GetWeakRefAccessEnabled()
+ : is_weak_access_enabled_.LoadSequentiallyConsistent();
+}
+
+void JitCodeCache::WaitUntilInlineCacheAccessible(Thread* self) {
+ if (IsWeakAccessEnabled(self)) {
+ return;
+ }
+ ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
MutexLock mu(self, lock_);
- for (ProfilingInfo* info : profiling_infos_) {
- if (!info->IsInUseByCompiler()) {
- info->ClearGcRootsInInlineCaches();
+ while (!IsWeakAccessEnabled(self)) {
+ inline_cache_cond_.Wait(self);
+ }
+}
+
+void JitCodeCache::BroadcastForInlineCacheAccess() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, lock_);
+ inline_cache_cond_.Broadcast(self);
+}
+
+void JitCodeCache::AllowInlineCacheAccess() {
+ DCHECK(!kUseReadBarrier);
+ is_weak_access_enabled_.StoreSequentiallyConsistent(true);
+ BroadcastForInlineCacheAccess();
+}
+
+void JitCodeCache::DisallowInlineCacheAccess() {
+ DCHECK(!kUseReadBarrier);
+ is_weak_access_enabled_.StoreSequentiallyConsistent(false);
+}
+
+void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic,
+ Handle<mirror::ObjectArray<mirror::Class>> array) {
+ WaitUntilInlineCacheAccessible(Thread::Current());
+ // Note that we don't need to lock `lock_` here, the compiler calling
+ // this method has already ensured the inline cache will not be deleted.
+ for (size_t in_cache = 0, in_array = 0;
+ in_cache < InlineCache::kIndividualCacheSize;
+ ++in_cache) {
+ mirror::Class* object = ic.classes_[in_cache].Read();
+ if (object != nullptr) {
+ array->Set(in_array++, object);
}
}
}
@@ -837,8 +907,6 @@
if (collect_profiling_info) {
ScopedThreadSuspension sts(self, kSuspended);
- gc::ScopedGCCriticalSection gcs(
- self, gc::kGcCauseJitCodeCache, gc::kCollectorTypeJitCodeCache);
MutexLock mu(self, lock_);
// Free all profiling infos of methods not compiled nor being compiled.
auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
@@ -852,10 +920,6 @@
// code cache collection.
if (ContainsPc(ptr) &&
info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
- // We clear the inline caches as classes in it might be stalled.
- info->ClearGcRootsInInlineCaches();
- // Do a fence to make sure the clearing is seen before attaching to the method.
- QuasiAtomic::ThreadFenceRelease();
info->GetMethod()->SetProfilingInfo(info);
} else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) {
// No need for this ProfilingInfo object anymore.
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 40112fe..be2cec5 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -36,6 +36,7 @@
class ArtMethod;
class LinearAlloc;
+class InlineCache;
class ProfilingInfo;
namespace jit {
@@ -156,7 +157,9 @@
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
+ void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
// will collect and retry if the first allocation is unsuccessful.
@@ -200,6 +203,12 @@
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // The GC needs to disallow the reading of inline caches when it processes them,
+ // to avoid having a class being used while it is being deleted.
+ void AllowInlineCacheAccess() REQUIRES(!lock_);
+ void DisallowInlineCacheAccess() REQUIRES(!lock_);
+ void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
+
private:
// Take ownership of maps.
JitCodeCache(MemMap* code_map,
@@ -275,6 +284,11 @@
void FreeData(uint8_t* data) REQUIRES(lock_);
uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
+ bool IsWeakAccessEnabled(Thread* self) const;
+ void WaitUntilInlineCacheAccessible(Thread* self)
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Lock for guarding allocations, collections, and the method_code_map_.
Mutex lock_;
// Condition to wait on during collection.
@@ -347,6 +361,14 @@
// Histograms for keeping track of profiling info statistics.
Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
+ // Whether the GC allows accessing weaks in inline caches. Note that this
+ // is not used by the concurrent collector, which uses
+ // Thread::SetWeakRefAccessEnabled instead.
+ Atomic<bool> is_weak_access_enabled_;
+
+ // Condition to wait on for accessing inline caches.
+ ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 9ec46f0..405280d 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -36,15 +36,6 @@
for (size_t i = 0; i < number_of_inline_caches_; ++i) {
cache_[i].dex_pc_ = entries[i];
}
- if (method->IsCopied()) {
- // GetHoldingClassOfCopiedMethod is expensive, but creating a profiling info for a copied method
- // appears to happen very rarely in practice.
- holding_class_ = GcRoot<mirror::Class>(
- Runtime::Current()->GetClassLinker()->GetHoldingClassOfCopiedMethod(method));
- } else {
- holding_class_ = GcRoot<mirror::Class>(method->GetDeclaringClass());
- }
- DCHECK(!holding_class_.IsNull());
}
bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocation) {
@@ -116,14 +107,6 @@
--i;
} else {
// We successfully set `cls`, just return.
- // Since the instrumentation is marked from the declaring class we need to mark the card so
- // that mod-union tables and card rescanning know about the update.
- // Note that the declaring class is not necessarily the holding class if the method is
- // copied. We need the card mark to be in the holding class since that is from where we
- // will visit the profiling info.
- if (!holding_class_.IsNull()) {
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(holding_class_.Read());
- }
return;
}
}
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 1056fac..9902bb5 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -39,46 +39,13 @@
// Once the classes_ array is full, we consider the INVOKE to be megamorphic.
class InlineCache {
public:
- bool IsMonomorphic() const {
- DCHECK_GE(kIndividualCacheSize, 2);
- return !classes_[0].IsNull() && classes_[1].IsNull();
- }
-
- bool IsMegamorphic() const {
- for (size_t i = 0; i < kIndividualCacheSize; ++i) {
- if (classes_[i].IsNull()) {
- return false;
- }
- }
- return true;
- }
-
- mirror::Class* GetMonomorphicType() const REQUIRES_SHARED(Locks::mutator_lock_) {
- // Note that we cannot ensure the inline cache is actually monomorphic
- // at this point, as other threads may have updated it.
- DCHECK(!classes_[0].IsNull());
- return classes_[0].Read();
- }
-
- bool IsUninitialized() const {
- return classes_[0].IsNull();
- }
-
- bool IsPolymorphic() const {
- DCHECK_GE(kIndividualCacheSize, 3);
- return !classes_[1].IsNull() && classes_[kIndividualCacheSize - 1].IsNull();
- }
-
- mirror::Class* GetTypeAt(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
- return classes_[i].Read();
- }
-
static constexpr uint16_t kIndividualCacheSize = 5;
private:
uint32_t dex_pc_;
GcRoot<mirror::Class> classes_[kIndividualCacheSize];
+ friend class jit::JitCodeCache;
friend class ProfilingInfo;
DISALLOW_COPY_AND_ASSIGN(InlineCache);
@@ -102,18 +69,6 @@
REQUIRES(Roles::uninterruptible_)
REQUIRES_SHARED(Locks::mutator_lock_);
- // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
- template<typename RootVisitorType>
- void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS {
- visitor.VisitRootIfNonNull(holding_class_.AddressWithoutBarrier());
- for (size_t i = 0; i < number_of_inline_caches_; ++i) {
- InlineCache* cache = &cache_[i];
- for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
- visitor.VisitRootIfNonNull(cache->classes_[j].AddressWithoutBarrier());
- }
- }
- }
-
ArtMethod* GetMethod() const {
return method_;
}
@@ -175,9 +130,6 @@
// Method this profiling info is for.
ArtMethod* const method_;
- // Holding class for the method in case method is a copied method.
- GcRoot<mirror::Class> holding_class_;
-
// Whether the ArtMethod is currently being compiled. This flag
// is implicitly guarded by the JIT code cache lock.
// TODO: Make the JIT code cache lock global.
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index aa5da2e..5def65e 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -65,8 +65,10 @@
OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
}
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline ClassLoader* Class::GetClassLoader() {
- return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_));
+ return GetFieldObject<ClassLoader, kVerifyFlags, kReadBarrierOption>(
+ OFFSET_OF_OBJECT_MEMBER(Class, class_loader_));
}
template<VerifyObjectFlags kVerifyFlags>
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 792f626..248c941 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -674,6 +674,8 @@
return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ClassLoader* GetClassLoader() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
void SetClassLoader(ObjPtr<ClassLoader> new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index ee4d669..8551791 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1782,6 +1782,9 @@
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
java_vm_->DisallowNewWeakGlobals();
heap_->DisallowNewAllocationRecords();
+ if (GetJit() != nullptr) {
+ GetJit()->GetCodeCache()->DisallowInlineCacheAccess();
+ }
// All other generic system-weak holders.
for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
@@ -1795,6 +1798,9 @@
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping.
java_vm_->AllowNewWeakGlobals();
heap_->AllowNewAllocationRecords();
+ if (GetJit() != nullptr) {
+ GetJit()->GetCodeCache()->AllowInlineCacheAccess();
+ }
// All other generic system-weak holders.
for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
@@ -1810,6 +1816,9 @@
intern_table_->BroadcastForNewInterns();
java_vm_->BroadcastForNewWeakGlobals();
heap_->BroadcastForNewAllocationRecords();
+ if (GetJit() != nullptr) {
+ GetJit()->GetCodeCache()->BroadcastForInlineCacheAccess();
+ }
// All other generic system-weak holders.
for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {