Move reference processor and mirror::Reference to ObjPtr
Bug: 31113334
Test: test-art-host
Change-Id: I2c7c3dfd88ebf12a0de271436f8a7781f997e061
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 9694597..4b8f38d 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -60,12 +60,13 @@
condition_.Broadcast(self);
}
-mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
+ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
+ ObjPtr<mirror::Reference> reference) {
if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) {
// Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when
// weak ref access is disabled as the call includes a read barrier which may push a ref onto the
// mark stack and interfere with termination of marking.
- mirror::Object* const referent = reference->GetReferent();
+ ObjPtr<mirror::Object> const referent = reference->GetReferent();
// If the referent is null then it is already cleared, we can just return null since there is no
// scenario where it becomes non-null during the reference processing phase.
if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
@@ -116,7 +117,8 @@
}
// Process reference class instances and schedule finalizations.
-void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
+void ReferenceProcessor::ProcessReferences(bool concurrent,
+ TimingLogger* timings,
bool clear_soft_references,
collector::GarbageCollector* collector) {
TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
@@ -188,7 +190,8 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
+void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref,
collector::GarbageCollector* collector) {
// klass can be the class of the old object if the visitor already updated the class of ref.
DCHECK(klass != nullptr);
@@ -260,7 +263,8 @@
}
}
-bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) {
+bool ReferenceProcessor::MakeCircularListIfUnenqueued(
+ ObjPtr<mirror::FinalizerReference> reference) {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::reference_processor_lock_);
// Wait untul we are done processing reference.
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 4788f8a..759b7e1 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -46,7 +46,9 @@
class ReferenceProcessor {
public:
explicit ReferenceProcessor();
- void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references,
+ void ProcessReferences(bool concurrent,
+ TimingLogger* timings,
+ bool clear_soft_references,
gc::collector::GarbageCollector* collector)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_)
@@ -57,16 +59,17 @@
void EnableSlowPath() REQUIRES_SHARED(Locks::mutator_lock_);
void BroadcastForSlowPath(Thread* self);
// Decode the referent, may block if references are being processed.
- mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
+ ObjPtr<mirror::Object> GetReferent(Thread* self, ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_);
void EnqueueClearedReferences(Thread* self) REQUIRES(!Locks::mutator_lock_);
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref,
collector::GarbageCollector* collector)
REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateRoots(IsMarkedVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock.
- bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference)
+ bool MakeCircularListIfUnenqueued(ObjPtr<mirror::FinalizerReference> reference)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::reference_processor_lock_,
!Locks::reference_queue_finalizer_references_lock_);
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 62625c4..4e6f7da 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -29,7 +29,7 @@
ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
}
-void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
+void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) {
DCHECK(ref != nullptr);
MutexLock mu(self, *lock_);
if (ref->IsUnprocessed()) {
@@ -37,16 +37,16 @@
}
}
-void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
+void ReferenceQueue::EnqueueReference(ObjPtr<mirror::Reference> ref) {
DCHECK(ref != nullptr);
CHECK(ref->IsUnprocessed());
if (IsEmpty()) {
// 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
- list_ = ref;
+ list_ = ref.Ptr();
} else {
// The list is owned by the GC, everything that has been inserted must already be at least
// gray.
- mirror::Reference* head = list_->GetPendingNext<kWithoutReadBarrier>();
+ ObjPtr<mirror::Reference> head = list_->GetPendingNext<kWithoutReadBarrier>();
DCHECK(head != nullptr);
ref->SetPendingNext(head);
}
@@ -54,16 +54,16 @@
list_->SetPendingNext(ref);
}
-mirror::Reference* ReferenceQueue::DequeuePendingReference() {
+ObjPtr<mirror::Reference> ReferenceQueue::DequeuePendingReference() {
DCHECK(!IsEmpty());
- mirror::Reference* ref = list_->GetPendingNext<kWithoutReadBarrier>();
+ ObjPtr<mirror::Reference> ref = list_->GetPendingNext<kWithoutReadBarrier>();
DCHECK(ref != nullptr);
// Note: the following code is thread-safe because it is only called from ProcessReferences which
// is single threaded.
if (list_ == ref) {
list_ = nullptr;
} else {
- mirror::Reference* next = ref->GetPendingNext<kWithoutReadBarrier>();
+ ObjPtr<mirror::Reference> next = ref->GetPendingNext<kWithoutReadBarrier>();
list_->SetPendingNext(next);
}
ref->SetPendingNext(nullptr);
@@ -83,10 +83,10 @@
// In ConcurrentCopying::ProcessMarkStackRef() we may leave a white reference in the queue and
// find it here, which is OK.
CHECK_EQ(rb_ptr, ReadBarrier::WhitePtr()) << "ref=" << ref << " rb_ptr=" << rb_ptr;
- mirror::Object* referent = ref->GetReferent<kWithoutReadBarrier>();
+ ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>();
// The referent could be null if it's cleared by a mutator (Reference.clear()).
if (referent != nullptr) {
- CHECK(concurrent_copying->IsInToSpace(referent))
+ CHECK(concurrent_copying->IsInToSpace(referent.Ptr()))
<< "ref=" << ref << " rb_ptr=" << ref->GetReadBarrierPointer()
<< " referent=" << referent;
}
@@ -96,13 +96,13 @@
}
void ReferenceQueue::Dump(std::ostream& os) const {
- mirror::Reference* cur = list_;
+ ObjPtr<mirror::Reference> cur = list_;
os << "Reference starting at list_=" << list_ << "\n";
if (cur == nullptr) {
return;
}
do {
- mirror::Reference* pending_next = cur->GetPendingNext();
+ ObjPtr<mirror::Reference> pending_next = cur->GetPendingNext();
os << "Reference= " << cur << " PendingNext=" << pending_next;
if (cur->IsFinalizerReferenceInstance()) {
os << " Zombie=" << cur->AsFinalizerReference()->GetZombie();
@@ -114,7 +114,7 @@
size_t ReferenceQueue::GetLength() const {
size_t count = 0;
- mirror::Reference* cur = list_;
+ ObjPtr<mirror::Reference> cur = list_;
if (cur != nullptr) {
do {
++count;
@@ -127,7 +127,7 @@
void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector) {
while (!IsEmpty()) {
- mirror::Reference* ref = DequeuePendingReference();
+ ObjPtr<mirror::Reference> ref = DequeuePendingReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
if (referent_addr->AsMirrorPtr() != nullptr &&
!collector->IsMarkedHeapReference(referent_addr)) {
@@ -145,11 +145,11 @@
void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector) {
while (!IsEmpty()) {
- mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference();
+ ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
if (referent_addr->AsMirrorPtr() != nullptr &&
!collector->IsMarkedHeapReference(referent_addr)) {
- mirror::Object* forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
+ ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
ref->SetZombie<true>(forward_address);
@@ -167,8 +167,8 @@
if (UNLIKELY(IsEmpty())) {
return;
}
- mirror::Reference* const head = list_;
- mirror::Reference* ref = head;
+ ObjPtr<mirror::Reference> const head = list_;
+ ObjPtr<mirror::Reference> ref = head;
do {
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
if (referent_addr->AsMirrorPtr() != nullptr) {
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 1de1aa1..b5ec1e5 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -26,6 +26,7 @@
#include "base/timing_logger.h"
#include "globals.h"
#include "jni.h"
+#include "obj_ptr.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "thread_pool.h"
@@ -54,15 +55,15 @@
// Enqueue a reference if it is unprocessed. Thread safe to call from multiple
// threads since it uses a lock to avoid a race between checking for the references presence and
// adding it.
- void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
+ void AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*lock_);
// Enqueue a reference. The reference must be unprocessed.
// Not thread safe, used when mutators are paused to minimize lock overhead.
- void EnqueueReference(mirror::Reference* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ void EnqueueReference(ObjPtr<mirror::Reference> ref) REQUIRES_SHARED(Locks::mutator_lock_);
// Dequeue a reference from the queue and return that dequeued reference.
- mirror::Reference* DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::Reference> DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
// Enqueues finalizer references with white referents. White referents are blackened, moved to
// the zombie field, and the referent field is cleared.
@@ -104,7 +105,7 @@
// calling AtomicEnqueueIfNotEnqueued.
Mutex* const lock_;
// The actual reference list. Only a root for the mark compact GC since it will be null for other
- // GC types.
+ // GC types. Not an ObjPtr since it is accessed from multiple threads.
mirror::Reference* list_;
DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceQueue);
diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc
index 5b8a3c2..3ca3353 100644
--- a/runtime/gc/reference_queue_test.cc
+++ b/runtime/gc/reference_queue_test.cc
@@ -52,10 +52,10 @@
std::set<mirror::Reference*> refs = {ref1.Get(), ref2.Get()};
std::set<mirror::Reference*> dequeued;
- dequeued.insert(queue.DequeuePendingReference());
+ dequeued.insert(queue.DequeuePendingReference().Ptr());
ASSERT_TRUE(!queue.IsEmpty());
ASSERT_EQ(queue.GetLength(), 1U);
- dequeued.insert(queue.DequeuePendingReference());
+ dequeued.insert(queue.DequeuePendingReference().Ptr());
ASSERT_EQ(queue.GetLength(), 0U);
ASSERT_TRUE(queue.IsEmpty());
ASSERT_EQ(refs, dequeued);
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 46b9e80..4a3654b 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1186,13 +1186,13 @@
// This allows statically initializing ConcurrentHashMap and SynchronousQueue.
void UnstartedRuntime::UnstartedReferenceGetReferent(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
- mirror::Reference* const ref = down_cast<mirror::Reference*>(
+ ObjPtr<mirror::Reference> const ref = down_cast<mirror::Reference*>(
shadow_frame->GetVRegReference(arg_offset));
if (ref == nullptr) {
AbortTransactionOrFail(self, "Reference.getReferent() with null object");
return;
}
- mirror::Object* const referent =
+ ObjPtr<mirror::Object> const referent =
Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(self, ref);
result->SetL(referent);
}
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index 039989b..a449b41 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -19,6 +19,8 @@
#include "reference.h"
+#include "obj_ptr-inl.h"
+
namespace art {
namespace mirror {
@@ -27,6 +29,24 @@
return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0, pointer_size);
}
+template<bool kTransactionActive>
+inline void Reference::SetReferent(ObjPtr<Object> referent) {
+ SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
+}
+
+inline void Reference::SetPendingNext(ObjPtr<Reference> pending_next) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ SetFieldObject<true>(PendingNextOffset(), pending_next);
+ } else {
+ SetFieldObject<false>(PendingNextOffset(), pending_next);
+ }
+}
+
+template<bool kTransactionActive>
+inline void FinalizerReference::SetZombie(ObjPtr<Object> zombie) {
+ return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/reference.cc b/runtime/mirror/reference.cc
index 3c7f8c8..1d0b4c5 100644
--- a/runtime/mirror/reference.cc
+++ b/runtime/mirror/reference.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "reference.h"
+#include "reference-inl.h"
#include "art_method.h"
#include "gc_root-inl.h"
@@ -24,7 +24,7 @@
GcRoot<Class> Reference::java_lang_ref_Reference_;
-void Reference::SetClass(Class* java_lang_ref_Reference) {
+void Reference::SetClass(ObjPtr<Class> java_lang_ref_Reference) {
CHECK(java_lang_ref_Reference_.IsNull());
CHECK(java_lang_ref_Reference != nullptr);
java_lang_ref_Reference_ = GcRoot<Class>(java_lang_ref_Reference);
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 6a8b32b..f2fa589 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -20,6 +20,7 @@
#include "base/enums.h"
#include "class.h"
#include "gc_root.h"
+#include "obj_ptr.h"
#include "object.h"
#include "object_callbacks.h"
#include "read_barrier_option.h"
@@ -69,9 +70,7 @@
ReferentOffset());
}
template<bool kTransactionActive>
- void SetReferent(Object* referent) REQUIRES_SHARED(Locks::mutator_lock_) {
- SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
- }
+ void SetReferent(ObjPtr<Object> referent) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void ClearReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
@@ -82,14 +81,7 @@
return GetFieldObject<Reference, kDefaultVerifyFlags, kReadBarrierOption>(PendingNextOffset());
}
- void SetPendingNext(Reference* pending_next)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (Runtime::Current()->IsActiveTransaction()) {
- SetFieldObject<true>(PendingNextOffset(), pending_next);
- } else {
- SetFieldObject<false>(PendingNextOffset(), pending_next);
- }
- }
+ void SetPendingNext(ObjPtr<Reference> pending_next) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the reference's pendingNext is null, indicating it is
// okay to process this reference.
@@ -112,7 +104,7 @@
DCHECK(!java_lang_ref_Reference_.IsNull());
return java_lang_ref_Reference_.Read<kReadBarrierOption>();
}
- static void SetClass(Class* klass);
+ static void SetClass(ObjPtr<Class> klass);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -144,9 +136,8 @@
}
template<bool kTransactionActive>
- void SetZombie(Object* zombie) REQUIRES_SHARED(Locks::mutator_lock_) {
- return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
- }
+ void SetZombie(ObjPtr<Object> zombie) REQUIRES_SHARED(Locks::mutator_lock_);
+
Object* GetZombie() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(ZombieOffset());
}
diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc
index 95f6d51..bedca10 100644
--- a/runtime/native/java_lang_ref_Reference.cc
+++ b/runtime/native/java_lang_ref_Reference.cc
@@ -28,8 +28,8 @@
static jobject Reference_getReferent(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::Reference> ref = soa.Decode<mirror::Reference>(javaThis);
- mirror::Object* const referent =
- Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(soa.Self(), ref.Ptr());
+ ObjPtr<mirror::Object> const referent =
+ Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(soa.Self(), ref);
return soa.AddLocalReference<jobject>(referent);
}