Add native support for FinalizerList.makeCircularListIfUnenqueued
Called from FinalizerReference.enqueueSentinelReference to prevent
a race where the GC updates pendingNext of the sentinel reference
before enqueueSentinelReference.
Bug: 17462553
(cherry picked from commit 3256166df40981f1f1997a5f00303712277c963f)
Change-Id: I7ad2fd250c2715d1aeb919bd548ef9aab24f30a2
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 52a3dea..455680b 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -37,19 +37,25 @@
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
Mutex* Locks::deoptimization_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
+Mutex* Locks::intern_table_lock_ = nullptr;
Mutex* Locks::jni_libraries_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
Mutex* Locks::mem_maps_lock_ = nullptr;
Mutex* Locks::modify_ldt_lock_ = nullptr;
ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
Mutex* Locks::profiler_lock_ = nullptr;
+Mutex* Locks::reference_processor_lock_ = nullptr;
+Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
-Mutex* Locks::intern_table_lock_ = nullptr;
struct AllMutexData {
// A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
@@ -933,6 +939,30 @@
DCHECK(intern_table_lock_ == nullptr);
intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
+ DCHECK(reference_processor_lock_ == nullptr);
+ reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
+ DCHECK(reference_queue_cleared_references_lock_ == nullptr);
+ reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
+ DCHECK(reference_queue_weak_references_lock_ == nullptr);
+ reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
+ DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
+ reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
+ DCHECK(reference_queue_phantom_references_lock_ == nullptr);
+ reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
+ DCHECK(reference_queue_soft_references_lock_ == nullptr);
+ reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
DCHECK(abort_lock_ == nullptr);
abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 354298e..20f58de 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -60,11 +60,16 @@
kThreadSuspendCountLock,
kAbortLock,
kJdwpSocketLock,
+ kReferenceQueueSoftReferencesLock,
+ kReferenceQueuePhantomReferencesLock,
+ kReferenceQueueFinalizerReferencesLock,
+ kReferenceQueueWeakReferencesLock,
+ kReferenceQueueClearedReferencesLock,
+ kReferenceProcessorLock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
kRosAllocBulkFreeLock,
kAllocSpaceLock,
- kReferenceProcessorLock,
kDexFileMethodInlinerLock,
kDexFileToMethodInlinerMapLock,
kMarkSweepMarkStackLock,
@@ -594,8 +599,26 @@
// Guards intern table.
static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
+ // Guards reference processor.
+ static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
+
+ // Guards cleared references queue.
+ static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
+
+ // Guards weak references queue.
+ static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
+
+ // Guards finalizer references queue.
+ static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
+
+ // Guards phantom references queue.
+ static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
+
+ // Guards soft references queue.
+ static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
+
// Have an exclusive aborting thread.
- static Mutex* abort_lock_ ACQUIRED_AFTER(intern_table_lock_);
+ static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
// Allow mutual exclusion when manipulating Thread::suspend_count_.
// TODO: Does the trade-off of a per-thread lock make sense?