Add ScopedThreadSuspension
Fixes the TransitionFromRunnableToSuspended and
TransitionFromSuspendedToRunnable pattern that was prone to errors.
Change-Id: Ie6ae9c0357c83b4fc4899d05dfa0975553170267
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index ded70cd..9ee0a36 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -688,7 +688,7 @@
const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
// Go to native so that we don't block GC during compilation.
- self->TransitionFromRunnableToSuspended(kNative);
+ ScopedThreadSuspension sts(self, kNative);
std::vector<const DexFile*> dex_files;
dex_files.push_back(dex_file);
@@ -718,7 +718,6 @@
dex_cache);
self->GetJniEnv()->DeleteGlobalRef(jclass_loader);
- self->TransitionFromSuspendedToRunnable();
}
CompiledMethod* CompilerDriver::CompileArtMethod(Thread* self, ArtMethod* method) {
@@ -737,7 +736,7 @@
GetDexToDexCompilationLevel(self, *this, class_loader, *dex_file, class_def);
const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
// Go to native so that we don't block GC during compilation.
- self->TransitionFromRunnableToSuspended(kNative);
+ ScopedThreadSuspension sts(self, kNative);
CompileMethod(self,
this,
code_item,
@@ -751,7 +750,6 @@
true,
dex_cache);
auto* compiled_method = GetCompiledMethod(MethodReference(dex_file, method_idx));
- self->TransitionFromSuspendedToRunnable();
return compiled_method;
}
@@ -2382,7 +2380,7 @@
}
// Go to native so that we don't block GC during compilation.
- soa.Self()->TransitionFromRunnableToSuspended(kNative);
+ ScopedThreadSuspension sts(soa.Self(), kNative);
CompilerDriver* const driver = manager_->GetCompiler();
@@ -2437,8 +2435,6 @@
it.Next();
}
DCHECK(!it.HasNext());
-
- soa.Self()->TransitionFromSuspendedToRunnable();
}
private:
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index dbd3366..f318921 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -85,11 +85,9 @@
bool ImageWriter::PrepareImageAddressSpace() {
target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet());
{
- Thread::Current()->TransitionFromSuspendedToRunnable();
+ ScopedObjectAccess soa(Thread::Current());
PruneNonImageClasses(); // Remove junk
ComputeLazyFieldsForImageClasses(); // Add useful information
-
- Thread::Current()->TransitionFromRunnableToSuspended(kNative);
}
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->CollectGarbage(false); // Remove garbage.
@@ -109,9 +107,10 @@
CheckNonImageClassesRemoved();
}
- Thread::Current()->TransitionFromSuspendedToRunnable();
- CalculateNewObjectOffsets();
- Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ CalculateNewObjectOffsets();
+ }
// This needs to happen after CalculateNewObjectOffsets since it relies on intern_table_bytes_ and
// bin size sums being calculated.
@@ -164,14 +163,14 @@
size_t oat_data_offset = 0;
ElfWriter::GetOatElfInformation(oat_file.get(), &oat_loaded_size, &oat_data_offset);
- Thread::Current()->TransitionFromSuspendedToRunnable();
-
- CreateHeader(oat_loaded_size, oat_data_offset);
- CopyAndFixupNativeData();
- // TODO: heap validation can't handle these fix up passes.
- Runtime::Current()->GetHeap()->DisableObjectValidation();
- CopyAndFixupObjects();
- Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ CreateHeader(oat_loaded_size, oat_data_offset);
+ CopyAndFixupNativeData();
+ // TODO: heap validation can't handle these fix up passes.
+ Runtime::Current()->GetHeap()->DisableObjectValidation();
+ CopyAndFixupObjects();
+ }
SetOatChecksumFromElfFile(oat_file.get());
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index c553a18..b59edc9 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1604,14 +1604,11 @@
// Since FlushAllocStack() above resets the (active) allocation
// stack. Need to revoke the thread-local allocation stacks that
// point into it.
- {
- self->TransitionFromRunnableToSuspended(kNative);
- ThreadList* thread_list = Runtime::Current()->GetThreadList();
- thread_list->SuspendAll(__FUNCTION__);
- heap->RevokeAllThreadLocalAllocationStacks(self);
- thread_list->ResumeAll();
- self->TransitionFromSuspendedToRunnable();
- }
+ ScopedThreadSuspension sts(self, kNative);
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ thread_list->SuspendAll(__FUNCTION__);
+ heap->RevokeAllThreadLocalAllocationStacks(self);
+ thread_list->ResumeAll();
}
{
// Mark dex caches.
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 40e2cd3..4a45f49 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -32,20 +32,17 @@
static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size)
NO_THREAD_SAFETY_ANALYSIS {
- Runtime* r = Runtime::Current();
+ Runtime* const runtime = Runtime::Current();
+ Thread* const self = Thread::Current();
+ ScopedObjectAccess soa(self); // So we can create callee-save methods.
- Thread* t = Thread::Current();
- t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
-
- r->SetInstructionSet(isa);
- ArtMethod* save_method = r->CreateCalleeSaveMethod();
- r->SetCalleeSaveMethod(save_method, type);
+ runtime->SetInstructionSet(isa);
+ ArtMethod* save_method = runtime->CreateCalleeSaveMethod();
+ runtime->SetCalleeSaveMethod(save_method, type);
QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
<< type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
<< frame_info.FpSpillMask() << std::dec;
-
- t->TransitionFromRunnableToSuspended(ThreadState::kNative); // So we can shut down.
}
};
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 8d34f5a..ffca01d 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -736,14 +736,14 @@
// Ensure all threads are suspended while we read objects' lock words.
Thread* self = Thread::Current();
CHECK_EQ(self->GetState(), kRunnable);
- self->TransitionFromRunnableToSuspended(kSuspended);
- Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
- MonitorInfo monitor_info(o);
-
- Runtime::Current()->GetThreadList()->ResumeAll();
- self->TransitionFromSuspendedToRunnable();
-
+ MonitorInfo monitor_info;
+ {
+ ScopedThreadSuspension sts(self, kSuspended);
+ Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
+ monitor_info = MonitorInfo(o);
+ Runtime::Current()->GetThreadList()->ResumeAll();
+ }
if (monitor_info.owner_ != nullptr) {
expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
} else {
@@ -3148,7 +3148,7 @@
}
}
CHECK_EQ(self->GetState(), kRunnable);
- self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
+ ScopedThreadSuspension sts(self, kWaitingForDeoptimization);
// We need to suspend mutator threads first.
Runtime* const runtime = Runtime::Current();
runtime->GetThreadList()->SuspendAll(__FUNCTION__);
@@ -3164,7 +3164,6 @@
}
CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
runtime->GetThreadList()->ResumeAll();
- self->TransitionFromSuspendedToRunnable();
}
static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m)
@@ -3493,9 +3492,9 @@
// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
// cause suspension if the thread is the current thread.
-class ScopedThreadSuspension {
+class ScopedDebuggerThreadSuspension {
public:
- ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
+ ScopedDebuggerThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
REQUIRES(!Locks::thread_list_lock_)
SHARED_REQUIRES(Locks::mutator_lock_) :
thread_(nullptr),
@@ -3508,13 +3507,14 @@
if (thread_ == soa.Self()) {
self_suspend_ = true;
} else {
- soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
- jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
- bool timed_out;
- ThreadList* thread_list = Runtime::Current()->GetThreadList();
- Thread* suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true,
- &timed_out);
- CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
+ Thread* suspended_thread;
+ {
+ ScopedThreadSuspension sts(self, kWaitingForDebuggerSuspension);
+ jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
+ bool timed_out;
+ ThreadList* const thread_list = Runtime::Current()->GetThreadList();
+ suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
+ }
if (suspended_thread == nullptr) {
// Thread terminated from under us while suspending.
error_ = JDWP::ERR_INVALID_THREAD;
@@ -3534,7 +3534,7 @@
return error_;
}
- ~ScopedThreadSuspension() {
+ ~ScopedDebuggerThreadSuspension() {
if (other_suspend_) {
Runtime::Current()->GetThreadList()->Resume(thread_, true);
}
@@ -3550,7 +3550,7 @@
JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
JDWP::JdwpStepDepth step_depth) {
Thread* self = Thread::Current();
- ScopedThreadSuspension sts(self, thread_id);
+ ScopedDebuggerThreadSuspension sts(self, thread_id);
if (sts.GetError() != JDWP::ERR_NONE) {
return sts.GetError();
}
@@ -3986,10 +3986,9 @@
// Suspend other threads if the invoke is not single-threaded.
if ((pReq->options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
- soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
+ ScopedThreadSuspension sts(soa.Self(), kWaitingForDebuggerSuspension);
VLOG(jdwp) << " Suspending all threads";
Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
- soa.Self()->TransitionFromSuspendedToRunnable();
}
VLOG(jdwp) << " --> returned " << result_tag
@@ -4655,7 +4654,7 @@
context.SetChunkOverhead(0);
// Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
// RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
- self->TransitionFromRunnableToSuspended(kSuspended);
+ ScopedThreadSuspension sts(self, kSuspended);
ThreadList* tl = Runtime::Current()->GetThreadList();
tl->SuspendAll(__FUNCTION__);
{
@@ -4663,7 +4662,6 @@
space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
}
tl->ResumeAll();
- self->TransitionFromSuspendedToRunnable();
} else if (space->IsBumpPointerSpace()) {
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
context.SetChunkOverhead(0);
@@ -4671,15 +4669,16 @@
HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
} else if (space->IsRegionSpace()) {
heap->IncrementDisableMovingGC(self);
- self->TransitionFromRunnableToSuspended(kSuspended);
- ThreadList* tl = Runtime::Current()->GetThreadList();
- tl->SuspendAll(__FUNCTION__);
- ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- context.SetChunkOverhead(0);
- space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
- HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
- tl->ResumeAll();
- self->TransitionFromSuspendedToRunnable();
+ {
+ ScopedThreadSuspension sts(self, kSuspended);
+ ThreadList* tl = Runtime::Current()->GetThreadList();
+ tl->SuspendAll(__FUNCTION__);
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ context.SetChunkOverhead(0);
+ space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
+ HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
+ tl->ResumeAll();
+ }
heap->DecrementDisableMovingGC(self);
} else {
UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 5cdf967..0b36694 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -36,14 +36,13 @@
Runtime* r = Runtime::Current();
Thread* t = Thread::Current();
- t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
+
+ ScopedObjectAccess soa(t);
r->SetInstructionSet(isa);
ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, type);
- t->TransitionFromRunnableToSuspended(ThreadState::kNative); // So we can shut down.
-
return save_method;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 961b80f..9292c7a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -887,13 +887,14 @@
// easily broken. Visit objects while GC isn't running by using
// IncrementDisableMovingGC() and threads are suspended.
IncrementDisableMovingGC(self);
- self->TransitionFromRunnableToSuspended(kWaitingForVisitObjects);
- ThreadList* tl = Runtime::Current()->GetThreadList();
- tl->SuspendAll(__FUNCTION__);
- VisitObjectsInternalRegionSpace(callback, arg);
- VisitObjectsInternal(callback, arg);
- tl->ResumeAll();
- self->TransitionFromSuspendedToRunnable();
+ {
+ ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
+ ThreadList* tl = Runtime::Current()->GetThreadList();
+ tl->SuspendAll(__FUNCTION__);
+ VisitObjectsInternalRegionSpace(callback, arg);
+ VisitObjectsInternal(callback, arg);
+ tl->ResumeAll();
+ }
DecrementDisableMovingGC(self);
} else {
// GCs can move objects, so don't allow this.
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index e1c5b64..77f606d 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -298,18 +298,16 @@
}
}
-void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
- UNUSED(failed_alloc_bytes);
- Thread* self = Thread::Current();
+void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os,
+ size_t failed_alloc_bytes ATTRIBUTE_UNUSED) {
+ Thread* const self = Thread::Current();
size_t max_contiguous_allocation = 0;
// To allow the Walk/InspectAll() to exclusively-lock the mutator
// lock, temporarily release the shared access to the mutator
// lock here by transitioning to the suspended state.
Locks::mutator_lock_->AssertSharedHeld(self);
- self->TransitionFromRunnableToSuspended(kSuspended);
+ ScopedThreadSuspension sts(self, kSuspended);
Walk(MSpaceChunkCallback, &max_contiguous_allocation);
- self->TransitionFromSuspendedToRunnable();
- Locks::mutator_lock_->AssertSharedHeld(self);
os << "; failed due to fragmentation (largest possible contiguous allocation "
<< max_contiguous_allocation << " bytes)";
}
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 1a193c3..d8072ea 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -331,10 +331,8 @@
// The mutators are not suspended yet and we have a shared access
// to the mutator lock. Temporarily release the shared access by
// transitioning to the suspend state, and suspend the mutators.
- self->TransitionFromRunnableToSuspended(kSuspended);
+ ScopedThreadSuspension sts(self, kSuspended);
InspectAllRosAllocWithSuspendAll(callback, arg, do_null_callback_at_end);
- self->TransitionFromSuspendedToRunnable();
- Locks::mutator_lock_->AssertSharedHeld(self);
} else {
// The mutators are not suspended yet. Suspend the mutators.
InspectAllRosAllocWithSuspendAll(callback, arg, do_null_callback_at_end);
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index b49f7e1..56fe9ef 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -139,12 +139,11 @@
ScopedObjectAccess soa(Thread::Current());
instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
{
- soa.Self()->TransitionFromRunnableToSuspended(kSuspended);
+ ScopedThreadSuspension sts(soa.Self(), kSuspended);
Runtime* runtime = Runtime::Current();
runtime->GetThreadList()->SuspendAll("Instrumentation::ConfigureStubs");
instr->ConfigureStubs(key, level);
runtime->GetThreadList()->ResumeAll();
- soa.Self()->TransitionFromSuspendedToRunnable();
}
}
@@ -162,12 +161,11 @@
instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
TestInstrumentationListener listener;
{
- soa.Self()->TransitionFromRunnableToSuspended(kSuspended);
+ ScopedThreadSuspension sts(soa.Self(), kSuspended);
Runtime* runtime = Runtime::Current();
runtime->GetThreadList()->SuspendAll("Add instrumentation listener");
instr->AddListener(&listener, instrumentation_event);
runtime->GetThreadList()->ResumeAll();
- soa.Self()->TransitionFromSuspendedToRunnable();
}
ArtMethod* const event_method = nullptr;
@@ -182,12 +180,11 @@
listener.Reset();
{
- soa.Self()->TransitionFromRunnableToSuspended(kSuspended);
+ ScopedThreadSuspension sts(soa.Self(), kSuspended);
Runtime* runtime = Runtime::Current();
runtime->GetThreadList()->SuspendAll("Remove instrumentation listener");
instr->RemoveListener(&listener, instrumentation_event);
runtime->GetThreadList()->ResumeAll();
- soa.Self()->TransitionFromSuspendedToRunnable();
}
// Check the listener is not registered and is not notified of the event.
@@ -201,14 +198,13 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
- self->TransitionFromRunnableToSuspended(kSuspended);
+ ScopedThreadSuspension sts(self, kSuspended);
runtime->GetThreadList()->SuspendAll("Single method deoptimization");
if (enable_deoptimization) {
instrumentation->EnableDeoptimization();
}
instrumentation->Deoptimize(method);
runtime->GetThreadList()->ResumeAll();
- self->TransitionFromSuspendedToRunnable();
}
void UndeoptimizeMethod(Thread* self, ArtMethod* method,
@@ -216,64 +212,59 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
- self->TransitionFromRunnableToSuspended(kSuspended);
+ ScopedThreadSuspension sts(self, kSuspended);
runtime->GetThreadList()->SuspendAll("Single method undeoptimization");
instrumentation->Undeoptimize(method);
if (disable_deoptimization) {
instrumentation->DisableDeoptimization(key);
}
runtime->GetThreadList()->ResumeAll();
- self->TransitionFromSuspendedToRunnable();
}
void DeoptimizeEverything(Thread* self, const char* key, bool enable_deoptimization)
SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
- self->TransitionFromRunnableToSuspended(kSuspended);
+ ScopedThreadSuspension sts(self, kSuspended);
runtime->GetThreadList()->SuspendAll("Full deoptimization");
if (enable_deoptimization) {
instrumentation->EnableDeoptimization();
}
instrumentation->DeoptimizeEverything(key);
runtime->GetThreadList()->ResumeAll();
- self->TransitionFromSuspendedToRunnable();
}
void UndeoptimizeEverything(Thread* self, const char* key, bool disable_deoptimization)
SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
- self->TransitionFromRunnableToSuspended(kSuspended);
+ ScopedThreadSuspension sts(self, kSuspended);
runtime->GetThreadList()->SuspendAll("Full undeoptimization");
instrumentation->UndeoptimizeEverything(key);
if (disable_deoptimization) {
instrumentation->DisableDeoptimization(key);
}
runtime->GetThreadList()->ResumeAll();
- self->TransitionFromSuspendedToRunnable();
}
void EnableMethodTracing(Thread* self, const char* key, bool needs_interpreter)
SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
- self->TransitionFromRunnableToSuspended(kSuspended);
+ ScopedThreadSuspension sts(self, kSuspended);
runtime->GetThreadList()->SuspendAll("EnableMethodTracing");
instrumentation->EnableMethodTracing(key, needs_interpreter);
runtime->GetThreadList()->ResumeAll();
- self->TransitionFromSuspendedToRunnable();
}
void DisableMethodTracing(Thread* self, const char* key)
SHARED_REQUIRES(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
- self->TransitionFromRunnableToSuspended(kSuspended);
+ ScopedThreadSuspension sts(self, kSuspended);
runtime->GetThreadList()->SuspendAll("EnableMethodTracing");
instrumentation->DisableMethodTracing(key);
runtime->GetThreadList()->ResumeAll();
- self->TransitionFromSuspendedToRunnable();
}
private:
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 0235067..3ff7017 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -221,13 +221,13 @@
void InternTable::WaitUntilAccessible(Thread* self) {
Locks::intern_table_lock_->ExclusiveUnlock(self);
- self->TransitionFromRunnableToSuspended(kWaitingWeakGcRootRead);
- Locks::intern_table_lock_->ExclusiveLock(self);
- while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
- weak_intern_condition_.Wait(self);
+ {
+ ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
+ MutexLock mu(self, *Locks::intern_table_lock_);
+ while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
+ weak_intern_condition_.Wait(self);
+ }
}
- Locks::intern_table_lock_->ExclusiveUnlock(self);
- self->TransitionFromSuspendedToRunnable();
Locks::intern_table_lock_->ExclusiveLock(self);
}
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index b1c5cf0..1e8326b 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -406,9 +406,9 @@
check_jni_abort_hook_(check_jni_abort_hook_data_, os.str());
} else {
// Ensure that we get a native stack trace for this thread.
- self->TransitionFromRunnableToSuspended(kNative);
+ ScopedThreadSuspension sts(self, kNative);
LOG(FATAL) << os.str();
- self->TransitionFromSuspendedToRunnable(); // Unreachable, keep annotalysis happy.
+ UNREACHABLE();
}
}
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 5d21f17..06b67b3 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -623,7 +623,7 @@
CHECK(pReq != nullptr);
/* send request and possibly suspend ourselves */
JDWP::ObjectId thread_self_id = Dbg::GetThreadSelfId();
- self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
+ ScopedThreadSuspension sts(self, kWaitingForDebuggerSend);
if (suspend_policy != SP_NONE) {
AcquireJdwpTokenForEvent(threadId);
}
@@ -633,7 +633,6 @@
ScopedThreadStateChange stsc(self, kSuspended);
SuspendByPolicy(suspend_policy, thread_self_id);
}
- self->TransitionFromSuspendedToRunnable();
}
/*
@@ -1323,9 +1322,8 @@
}
if (safe_to_release_mutator_lock_over_send) {
// Change state to waiting to allow GC, ... while we're sending.
- self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
+ ScopedThreadSuspension sts(self, kWaitingForDebuggerSend);
SendBufferedRequest(type, wrapiov);
- self->TransitionFromSuspendedToRunnable();
} else {
// Send and possibly block GC...
SendBufferedRequest(type, wrapiov);
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 7776f8f..0a4d6e3 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -31,6 +31,7 @@
#include "jdwp/jdwp_expand_buf.h"
#include "jdwp/jdwp_priv.h"
#include "runtime.h"
+#include "scoped_thread_state_change.h"
#include "thread-inl.h"
#include "utils.h"
@@ -238,9 +239,8 @@
static JdwpError VM_Suspend(JdwpState*, Request*, ExpandBuf*)
SHARED_REQUIRES(Locks::mutator_lock_) {
Thread* self = Thread::Current();
- self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
+ ScopedThreadSuspension sts(self, kWaitingForDebuggerSuspension);
Dbg::SuspendVM();
- self->TransitionFromSuspendedToRunnable();
return ERR_NONE;
}
@@ -922,9 +922,8 @@
}
Thread* self = Thread::Current();
- self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
+ ScopedThreadSuspension sts(self, kWaitingForDebuggerSend);
JdwpError result = Dbg::SuspendThread(thread_id);
- self->TransitionFromSuspendedToRunnable();
return result;
}
@@ -1609,7 +1608,7 @@
* Do this after anything that can stall indefinitely.
*/
Thread* self = Thread::Current();
- ThreadState old_state = self->TransitionFromSuspendedToRunnable();
+ ScopedObjectAccess soa(self);
expandBufAddSpace(pReply, kJDWPHeaderLen);
@@ -1670,9 +1669,6 @@
last_activity_time_ms_.StoreSequentiallyConsistent(MilliTime());
}
- /* tell the VM that GC is okay again */
- self->TransitionFromRunnableToSuspended(old_state);
-
return replyLength;
}
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 1139a1e..668d5dc 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -536,9 +536,8 @@
ddm_is_active_ = false;
/* broadcast the disconnect; must be in RUNNING state */
- thread_->TransitionFromSuspendedToRunnable();
+ ScopedObjectAccess soa(thread_);
Dbg::DdmDisconnected();
- thread_->TransitionFromRunnableToSuspended(kWaitingInMainDebuggerLoop);
}
{
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 95fcb67..fa58418 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -454,15 +454,13 @@
uintptr_t saved_dex_pc = locking_dex_pc_;
locking_dex_pc_ = 0;
- /*
- * Update thread state. If the GC wakes up, it'll ignore us, knowing
- * that we won't touch any references in this state, and we'll check
- * our suspend mode before we transition out.
- */
- self->TransitionFromRunnableToSuspended(why);
-
bool was_interrupted = false;
{
+ // Update thread state. If the GC wakes up, it'll ignore us, knowing
+ // that we won't touch any references in this state, and we'll check
+ // our suspend mode before we transition out.
+ ScopedThreadSuspension sts(self, why);
+
// Pseudo-atomically wait on self's wait_cond_ and release the monitor lock.
MutexLock mu(self, *self->GetWaitMutex());
@@ -494,9 +492,6 @@
}
}
- // Set self->status back to kRunnable, and self-suspend if needed.
- self->TransitionFromSuspendedToRunnable();
-
{
// We reset the thread's wait_monitor_ field after transitioning back to runnable so
// that a thread in a waiting/sleeping state has a non-null wait_monitor_ for debugging
@@ -667,9 +662,11 @@
// Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
self->SetMonitorEnterObject(obj.Get());
bool timed_out;
- self->TransitionFromRunnableToSuspended(kBlocked);
- Thread* owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
- self->TransitionFromSuspendedToRunnable();
+ Thread* owner;
+ {
+ ScopedThreadSuspension sts(self, kBlocked);
+ owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
+ }
if (owner != nullptr) {
// We succeeded in suspending the thread, check the lock's status didn't change.
lock_word = obj->GetLockWord(true);
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 2e7dbc1..8cd93c6 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -320,14 +320,14 @@
// For use only by the JDWP implementation.
class MonitorInfo {
public:
+ MonitorInfo() = default;
+ MonitorInfo(const MonitorInfo&) = default;
+ MonitorInfo& operator=(const MonitorInfo&) = default;
explicit MonitorInfo(mirror::Object* o) REQUIRES(Locks::mutator_lock_);
Thread* owner_;
size_t entry_count_;
std::vector<Thread*> waiters_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MonitorInfo);
};
} // namespace art
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 69112b1..83e0c0d 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -316,7 +316,7 @@
}
// Need to drop the mutator lock to allow barriers.
- soa.Self()->TransitionFromRunnableToSuspended(kNative);
+ ScopedThreadSuspension sts(soa.Self(), kNative);
ThreadPool thread_pool(pool_name, 3);
thread_pool.AddTask(self, new CreateTask(test, create_sleep, c_millis, c_expected));
if (interrupt) {
@@ -340,7 +340,6 @@
}
thread_pool.StopWorkers(self);
- soa.Self()->TransitionFromSuspendedToRunnable();
}
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 541eeb1..7910f94 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -35,15 +35,16 @@
trace = soa.Self()->CreateInternalStackTrace<false>(soa);
} else {
// Suspend thread to build stack trace.
- soa.Self()->TransitionFromRunnableToSuspended(kNative);
+ ScopedThreadSuspension sts(soa.Self(), kSuspended);
ThreadList* thread_list = Runtime::Current()->GetThreadList();
bool timed_out;
Thread* thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
if (thread != nullptr) {
// Must be runnable to create returned array.
- CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kNative);
- trace = thread->CreateInternalStackTrace<false>(soa);
- soa.Self()->TransitionFromRunnableToSuspended(kNative);
+ {
+ ScopedObjectAccess soa2(soa.Self());
+ trace = thread->CreateInternalStackTrace<false>(soa);
+ }
// Restart suspended thread.
thread_list->Resume(thread, false);
} else {
@@ -52,7 +53,6 @@
"generous timeout.";
}
}
- CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kNative);
}
return trace;
}
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index b90aa0e..d1cc09a 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -31,7 +31,7 @@
// more complicated suspension checking. The subclasses ScopedObjectAccessUnchecked and
// ScopedObjectAccess are used to handle the change into Runnable to Get direct access to objects,
// the unchecked variant doesn't aid annotalysis.
-class ScopedThreadStateChange {
+class ScopedThreadStateChange : public ValueObject {
public:
ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE
@@ -102,7 +102,7 @@
};
// Assumes we are already runnable.
-class ScopedObjectAccessAlreadyRunnable {
+class ScopedObjectAccessAlreadyRunnable : public ValueObject {
public:
Thread* Self() const {
return self_;
@@ -277,6 +277,30 @@
DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccess);
};
+// Annotalysis helper for going to a suspended state from runnable.
+class ScopedThreadSuspension : public ValueObject {
+ public:
+ explicit ScopedThreadSuspension(Thread* self, ThreadState suspended_state)
+ REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
+ UNLOCK_FUNCTION(Locks::mutator_lock_)
+ ALWAYS_INLINE
+ : self_(self), suspended_state_(suspended_state) {
+ DCHECK(self_ != nullptr);
+ self_->TransitionFromRunnableToSuspended(suspended_state);
+ }
+
+ ~ScopedThreadSuspension() SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE {
+ DCHECK_EQ(self_->GetState(), suspended_state_);
+ self_->TransitionFromSuspendedToRunnable();
+ }
+
+ private:
+ Thread* const self_;
+ const ThreadState suspended_state_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedThreadSuspension);
+};
+
+
} // namespace art
#endif // ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 9929487..af5830a 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1035,9 +1035,8 @@
ATRACE_BEGIN("Full suspend check");
// Make thread appear suspended to other threads, release mutator_lock_.
tls32_.suspended_at_suspend_check = true;
- TransitionFromRunnableToSuspended(kSuspended);
- // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
- TransitionFromSuspendedToRunnable();
+ // Transition to suspended and back to runnable, re-acquire share on mutator_lock_.
+ ScopedThreadSuspension(this, kSuspended);
tls32_.suspended_at_suspend_check = false;
ATRACE_END();
VLOG(threads) << this << " self-reviving";