Visit class native roots from VisitReferences
Visit class roots when we call Class::VisitReferences instead of in
the class linker. This makes it easier to implement class unloading
since unmarked classes won't have their roots visited by the class
linker.
Bug: 22181835
Change-Id: I63f31e5ebef7b2a0b764b3ba3cb038b3f561b379
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index baa33b3..ec689f8 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -638,7 +638,7 @@
explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
@@ -651,6 +651,19 @@
this->operator()(ref, mirror::Reference::ReferentOffset(), false);
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
+ visitor(root->AsMirrorPtr());
+ }
+
private:
ConcurrentCopying* const collector_;
};
@@ -750,18 +763,31 @@
explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(ref);
}
- void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const
+ void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
+ visitor(root->AsMirrorPtr());
+ }
+
private:
ConcurrentCopying* const collector_;
};
@@ -1500,6 +1526,18 @@
collector_->DelayReferenceReferent(klass, ref);
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ collector_->MarkRoot(root);
+ }
+
private:
ConcurrentCopying* const collector_;
};
@@ -1513,7 +1551,8 @@
// Process a field.
inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
- mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
+ mirror::Object* ref = obj->GetFieldObject<
+ mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
if (ref == nullptr || region_space_->IsInToSpace(ref)) {
return;
}
@@ -1530,8 +1569,8 @@
// It was updated by the mutator.
break;
}
- } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>(
- offset, expected_ref, new_ref));
+ } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<
+ false, false, kVerifyNone>(offset, expected_ref, new_ref));
}
// Process some roots.
@@ -1559,22 +1598,18 @@
}
}
-void ConcurrentCopying::VisitRoots(
- mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info ATTRIBUTE_UNUSED) {
- for (size_t i = 0; i < count; ++i) {
- mirror::CompressedReference<mirror::Object>* root = roots[i];
- mirror::Object* ref = root->AsMirrorPtr();
- if (ref == nullptr || region_space_->IsInToSpace(ref)) {
- continue;
- }
- mirror::Object* to_ref = Mark(ref);
- if (to_ref == ref) {
- continue;
- }
+void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
+ DCHECK(!root->IsNull());
+ mirror::Object* const ref = root->AsMirrorPtr();
+ if (region_space_->IsInToSpace(ref)) {
+ return;
+ }
+ mirror::Object* to_ref = Mark(ref);
+ if (to_ref != ref) {
auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
+ // If the cas fails, then it was updated by the mutator.
do {
if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
// It was updated by the mutator.
@@ -1584,6 +1619,17 @@
}
}
+void ConcurrentCopying::VisitRoots(
+ mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) {
+ for (size_t i = 0; i < count; ++i) {
+ mirror::CompressedReference<mirror::Object>* const root = roots[i];
+ if (!root->IsNull()) {
+ MarkRoot(root);
+ }
+ }
+}
+
// Fill the given memory block with a dummy object. Used to fill in a
// copy of objects that was lost in race.
void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index d324ce1..a4fd71c 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -122,6 +122,8 @@
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
+ void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index c5ad613..4b2c588 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -438,6 +438,19 @@
ref->GetFieldObjectReferenceAddr<kVerifyNone>(mirror::Reference::ReferentOffset()));
}
+ // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ root->Assign(collector_->GetMarkedForwardAddress(root->AsMirrorPtr()));
+ }
+
private:
MarkCompact* const collector_;
};
@@ -575,6 +588,19 @@
collector_->DelayReferenceReferent(klass, ref);
}
+ // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ collector_->MarkObject(root->AsMirrorPtr());
+ }
+
private:
MarkCompact* const collector_;
};
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 92dde51..7f2c204 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -365,7 +365,7 @@
: mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {
}
- void operator()(const mirror::Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
if (kProfileLargeObjects) {
// TODO: Differentiate between marking and testing somehow.
++mark_sweep_->large_object_test_;
@@ -597,8 +597,7 @@
: mark_sweep_(mark_sweep) {}
void operator()(mirror::Object* obj) const ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(Locks::heap_bitmap_lock_) {
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -649,13 +648,33 @@
protected:
class MarkObjectParallelVisitor {
public:
- explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
- MarkSweep* mark_sweep) ALWAYS_INLINE
- : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
+ ALWAYS_INLINE explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
+ MarkSweep* mark_sweep)
+ : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
- void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const
+ ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ Mark(obj->GetFieldObject<mirror::Object>(offset));
+ }
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (kCheckLocks) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ Mark(root->AsMirrorPtr());
+ }
+
+ private:
+ void Mark(mirror::Object* ref) const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
if (kUseFinger) {
std::atomic_thread_fence(std::memory_order_seq_cst);
@@ -668,7 +687,6 @@
}
}
- private:
MarkStackTask<kUseFinger>* const chunk_task_;
MarkSweep* const mark_sweep_;
};
@@ -1268,6 +1286,22 @@
mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
}
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ if (kCheckLocks) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ mark_sweep_->MarkObject(root->AsMirrorPtr());
+ }
+
private:
MarkSweep* const mark_sweep_;
};
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 99e00f9..606be63 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -58,12 +58,12 @@
~MarkSweep() {}
- virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+ virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
void InitializePhase();
- void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_, !mark_stack_lock_);
+ void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void PausePhase() REQUIRES(Locks::mutator_lock_, !mark_stack_lock_);
void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
- void FinishPhase() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FinishPhase();
virtual void MarkReachableObjects()
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index e93ff05..acc1d9b 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -295,8 +295,26 @@
LOG(FATAL) << ref << " found in from space";
}
}
+
+ // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (kIsDebugBuild) {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ CHECK(!from_space_->HasAddress(root->AsMirrorPtr()));
+ }
+
private:
- space::ContinuousMemMapAllocSpace* from_space_;
+ space::ContinuousMemMapAllocSpace* const from_space_;
};
void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
@@ -313,6 +331,7 @@
DCHECK(obj != nullptr);
semi_space_->VerifyNoFromSpaceReferences(obj);
}
+
private:
SemiSpace* const semi_space_;
};
@@ -670,11 +689,27 @@
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
collector_->DelayReferenceReferent(klass, ref);
}
+ // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (kIsDebugBuild) {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ collector_->MarkObject(root);
+ }
+
private:
SemiSpace* const collector_;
};