am b15cbd16: am 8addd1f1: am 65e3b69b: Merge "Move modify_ldt_lock into global lock order."
* commit 'b15cbd16c062af85bbfe12019f7e7a2f5bd2fe5a':
Move modify_ldt_lock into global lock order.
diff --git a/runtime/arch/x86/thread_x86.cc b/runtime/arch/x86/thread_x86.cc
index 26cd864..9f36927 100644
--- a/runtime/arch/x86/thread_x86.cc
+++ b/runtime/arch/x86/thread_x86.cc
@@ -40,10 +40,9 @@
namespace art {
-static Mutex modify_ldt_lock("modify_ldt lock");
-
void Thread::InitCpu() {
- MutexLock mu(Thread::Current(), modify_ldt_lock);
+ // Take the ldt lock, Thread::Current isn't yet established.
+ MutexLock mu(nullptr, *Locks::modify_ldt_lock_);
const uintptr_t base = reinterpret_cast<uintptr_t>(this);
const size_t limit = kPageSize;
@@ -138,7 +137,7 @@
}
void Thread::CleanupCpu() {
- MutexLock mu(Thread::Current(), modify_ldt_lock);
+ MutexLock mu(this, *Locks::modify_ldt_lock_);
// Sanity check that reads from %fs point to this Thread*.
Thread* self_check;
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index adf4c66..6c415e7 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -132,9 +132,21 @@
// TODO: tighten this check.
if (kDebugLocking) {
Runtime* runtime = Runtime::Current();
- CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDownLocked() ||
- level == kDefaultMutexLevel || level == kRuntimeShutdownLock ||
- level == kThreadListLock || level == kLoggingLock || level == kAbortLock);
+ CHECK(runtime == nullptr || !runtime->IsStarted() || runtime->IsShuttingDownLocked() ||
+ // Used during thread creation to avoid races with runtime shutdown. Thread::Current not
+ // yet established.
+ level == kRuntimeShutdownLock ||
+ // Thread Ids are allocated/released before threads are established.
+ level == kAllocatedThreadIdsLock ||
+ // Thread LDT's are initialized without Thread::Current established.
+ level == kModifyLdtLock ||
+ // Threads are unregistered while holding the thread list lock, during this process they
+ // no longer exist and so we expect an unlock with no self.
+ level == kThreadListLock ||
+ // Ignore logging which may or may not have set up thread data structures.
+ level == kLoggingLock ||
+ // Avoid recursive death.
+ level == kAbortLock);
}
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 6f7f2c1..705be40 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -30,10 +30,12 @@
namespace art {
Mutex* Locks::abort_lock_ = nullptr;
+Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
Mutex* Locks::breakpoint_lock_ = nullptr;
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
+Mutex* Locks::modify_ldt_lock_ = nullptr;
ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
@@ -814,7 +816,13 @@
void Locks::Init() {
if (logging_lock_ != nullptr) {
// Already initialized.
+ if (kRuntimeISA == kX86) {
+ DCHECK(modify_ldt_lock_ != nullptr);
+ } else {
+ DCHECK(modify_ldt_lock_ == nullptr);
+ }
DCHECK(abort_lock_ != nullptr);
+ DCHECK(allocated_thread_ids_lock_ != nullptr);
DCHECK(breakpoint_lock_ != nullptr);
DCHECK(classlinker_classes_lock_ != nullptr);
DCHECK(heap_bitmap_lock_ != nullptr);
@@ -827,32 +835,76 @@
DCHECK(unexpected_signal_lock_ != nullptr);
DCHECK(intern_table_lock_ != nullptr);
} else {
- logging_lock_ = new Mutex("logging lock", kLoggingLock, true);
- abort_lock_ = new Mutex("abort lock", kAbortLock, true);
+ // Create global locks in level order from highest lock level to lowest.
+ LockLevel current_lock_level = kMutatorLock;
+ DCHECK(mutator_lock_ == nullptr);
+ mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
+ #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
+ DCHECK_LT(new_level, current_lock_level); \
+ current_lock_level = new_level;
+
+ UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
+ DCHECK(heap_bitmap_lock_ == nullptr);
+ heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
+ DCHECK(runtime_shutdown_lock_ == nullptr);
+ runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
+ DCHECK(profiler_lock_ == nullptr);
+ profiler_lock_ = new Mutex("profiler lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
+ DCHECK(trace_lock_ == nullptr);
+ trace_lock_ = new Mutex("trace lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
+ DCHECK(thread_list_lock_ == nullptr);
+ thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
DCHECK(breakpoint_lock_ == nullptr);
- breakpoint_lock_ = new Mutex("breakpoint lock", kBreakpointLock);
+ breakpoint_lock_ = new Mutex("breakpoint lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
DCHECK(classlinker_classes_lock_ == nullptr);
classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
- kClassLinkerClassesLock);
- DCHECK(heap_bitmap_lock_ == nullptr);
- heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", kHeapBitmapLock);
- DCHECK(mutator_lock_ == nullptr);
- mutator_lock_ = new ReaderWriterMutex("mutator lock", kMutatorLock);
- DCHECK(runtime_shutdown_lock_ == nullptr);
- runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", kRuntimeShutdownLock);
- DCHECK(thread_list_lock_ == nullptr);
- thread_list_lock_ = new Mutex("thread list lock", kThreadListLock);
- DCHECK(thread_suspend_count_lock_ == nullptr);
- thread_suspend_count_lock_ = new Mutex("thread suspend count lock", kThreadSuspendCountLock);
- DCHECK(trace_lock_ == nullptr);
- trace_lock_ = new Mutex("trace lock", kTraceLock);
- DCHECK(profiler_lock_ == nullptr);
- profiler_lock_ = new Mutex("profiler lock", kProfilerLock);
- DCHECK(unexpected_signal_lock_ == nullptr);
- unexpected_signal_lock_ = new Mutex("unexpected signal lock", kUnexpectedSignalLock, true);
+ current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
+ DCHECK(allocated_thread_ids_lock_ == nullptr);
+ allocated_thread_ids_lock_ = new Mutex("allocated thread ids lock", current_lock_level);
+
+ if (kRuntimeISA == kX86) {
+ UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
+ DCHECK(modify_ldt_lock_ == nullptr);
+ modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
+ }
+
+ UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
DCHECK(intern_table_lock_ == nullptr);
- intern_table_lock_ = new Mutex("InternTable lock", kInternTableLock);
+ intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
+
+
+ UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
+ DCHECK(abort_lock_ == nullptr);
+ abort_lock_ = new Mutex("abort lock", current_lock_level, true);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
+ DCHECK(thread_suspend_count_lock_ == nullptr);
+ thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
+ DCHECK(unexpected_signal_lock_ == nullptr);
+ unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
+ DCHECK(logging_lock_ == nullptr);
+ logging_lock_ = new Mutex("logging lock", current_lock_level, true);
+
+ #undef UPDATE_CURRENT_LOCK_LEVEL
}
}
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index e13c8d5..522692e 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -74,6 +74,8 @@
kPinTableLock,
kLoadLibraryLock,
kJdwpObjectRegistryLock,
+ kModifyLdtLock,
+ kAllocatedThreadIdsLock,
kClassLinkerClassesLock,
kBreakpointLock,
kMonitorLock,
@@ -532,28 +534,34 @@
// Guards shutdown of the runtime.
static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
+ // Guards background profiler global state.
+ static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+
+ // Guards trace (ie traceview) requests.
+ static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
+
// The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
// attaching and detaching.
- static Mutex* thread_list_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+ static Mutex* thread_list_lock_ ACQUIRED_AFTER(trace_lock_);
// Guards breakpoints.
static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
- // Guards trace requests.
- static Mutex* trace_lock_ ACQUIRED_AFTER(breakpoint_lock_);
-
- // Guards profile objects.
- static Mutex* profiler_lock_ ACQUIRED_AFTER(trace_lock_);
-
// Guards lists of classes within the class linker.
- static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(profiler_lock_);
+ static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
// When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
// doesn't try to hold a higher level Mutex.
#define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
+ // Guard the allocation/deallocation of thread ids.
+ static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+
+ // Guards modification of the LDT on x86.
+ static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
+
// Guards intern table.
- static Mutex* intern_table_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+ static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
// Have an exclusive aborting thread.
static Mutex* abort_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
diff --git a/runtime/oat.cc b/runtime/oat.cc
index cb9334a..10d335e 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '2', '8', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '2', '9', '\0' };
OatHeader::OatHeader() {
memset(this, 0, sizeof(*this));
diff --git a/runtime/thread.h b/runtime/thread.h
index 62fa323..de054ee 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1045,9 +1045,6 @@
// A cached pthread_t for the pthread underlying this Thread*.
pthread_t pthread_self;
- // Support for Mutex lock hierarchy bug detection.
- BaseMutex* held_mutexes[kLockLevelCount];
-
// If no_thread_suspension_ is > 0, what is causing that assertion.
const char* last_no_thread_suspension_cause;
@@ -1074,6 +1071,9 @@
// Thread-local allocation stack data/routines.
mirror::Object** thread_local_alloc_stack_top;
mirror::Object** thread_local_alloc_stack_end;
+
+ // Support for Mutex lock hierarchy bug detection.
+ BaseMutex* held_mutexes[kLockLevelCount];
} tlsPtr_;
// Guards the 'interrupted_' and 'wait_monitor_' members.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 8046500..388c9b4 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -40,8 +40,7 @@
namespace art {
ThreadList::ThreadList()
- : allocated_ids_lock_("allocated thread ids lock"),
- suspend_all_count_(0), debug_suspend_all_count_(0),
+ : suspend_all_count_(0), debug_suspend_all_count_(0),
thread_exit_cond_("thread exit condition variable", *Locks::thread_list_lock_) {
CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1)));
}
@@ -849,7 +848,7 @@
}
uint32_t ThreadList::AllocThreadId(Thread* self) {
- MutexLock mu(self, allocated_ids_lock_);
+ MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
for (size_t i = 0; i < allocated_ids_.size(); ++i) {
if (!allocated_ids_[i]) {
allocated_ids_.set(i);
@@ -861,7 +860,7 @@
}
void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
- MutexLock mu(self, allocated_ids_lock_);
+ MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
--id; // Zero is reserved to mean "invalid".
DCHECK(allocated_ids_[id]) << id;
allocated_ids_.reset(id);
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index a574340..d46987a 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -132,7 +132,7 @@
private:
uint32_t AllocThreadId(Thread* self);
- void ReleaseThreadId(Thread* self, uint32_t id) LOCKS_EXCLUDED(allocated_ids_lock_);
+ void ReleaseThreadId(Thread* self, uint32_t id) LOCKS_EXCLUDED(Locks::allocated_thread_ids_lock_);
bool Contains(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
bool Contains(pid_t tid) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
@@ -151,8 +151,7 @@
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
- mutable Mutex allocated_ids_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(allocated_ids_lock_);
+ std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(Locks::allocated_thread_ids_lock_);
// The actual list of all threads.
std::list<Thread*> list_ GUARDED_BY(Locks::thread_list_lock_);