blob: ff1ed2a4d27eb063c8f1887c409db9cb3d90cf9c [file] [log] [blame]
Elliott Hughes8daa0922011-09-11 13:46:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "thread_list.h"
18
Elliott Hughesabbe07d2012-06-05 17:42:23 -070019#include <dirent.h>
Ian Rogersd9c4fc92013-10-01 19:45:43 -070020#include <ScopedLocalRef.h>
21#include <ScopedUtfChars.h>
Elliott Hughesabbe07d2012-06-05 17:42:23 -070022#include <sys/types.h>
Elliott Hughes038a8062011-09-18 14:12:41 -070023#include <unistd.h>
24
Elliott Hughes76b61672012-12-12 17:47:30 -080025#include "base/mutex.h"
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -070026#include "base/mutex-inl.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080027#include "base/timing_logger.h"
Elliott Hughes475fc232011-10-25 15:00:35 -070028#include "debugger.h"
Ian Rogersd9c4fc92013-10-01 19:45:43 -070029#include "jni_internal.h"
30#include "lock_word.h"
31#include "monitor.h"
32#include "scoped_thread_state_change.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080033#include "thread.h"
Elliott Hughesabbe07d2012-06-05 17:42:23 -070034#include "utils.h"
Ian Rogersd9c4fc92013-10-01 19:45:43 -070035#include "well_known_classes.h"
Elliott Hughes475fc232011-10-25 15:00:35 -070036
Elliott Hughes8daa0922011-09-11 13:46:25 -070037namespace art {
38
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -080039ThreadList::ThreadList()
Elliott Hughese52e49b2012-04-02 16:05:44 -070040 : allocated_ids_lock_("allocated thread ids lock"),
Ian Rogers00f7d0e2012-07-19 15:28:27 -070041 suspend_all_count_(0), debug_suspend_all_count_(0),
Ian Rogersc604d732012-10-14 16:09:54 -070042 thread_exit_cond_("thread exit condition variable", *Locks::thread_list_lock_) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -070043 CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1)));
Elliott Hughes8daa0922011-09-11 13:46:25 -070044}
45
46ThreadList::~ThreadList() {
Elliott Hughese52e49b2012-04-02 16:05:44 -070047 // Detach the current thread if necessary. If we failed to start, there might not be any threads.
Elliott Hughes6a144332012-04-03 13:07:11 -070048 // We need to detach the current thread here in case there's another thread waiting to join with
49 // us.
Elliott Hughes8daa0922011-09-11 13:46:25 -070050 if (Contains(Thread::Current())) {
51 Runtime::Current()->DetachCurrentThread();
52 }
Elliott Hughes6a144332012-04-03 13:07:11 -070053
54 WaitForOtherNonDaemonThreadsToExit();
Ian Rogers00f7d0e2012-07-19 15:28:27 -070055 // TODO: there's an unaddressed race here where a thread may attach during shutdown, see
56 // Thread::Init.
Elliott Hughes6a144332012-04-03 13:07:11 -070057 SuspendAllDaemonThreads();
Elliott Hughes8daa0922011-09-11 13:46:25 -070058}
59
60bool ThreadList::Contains(Thread* thread) {
61 return find(list_.begin(), list_.end(), thread) != list_.end();
62}
63
Elliott Hughesabbe07d2012-06-05 17:42:23 -070064bool ThreadList::Contains(pid_t tid) {
Mathieu Chartier02e25112013-08-14 16:14:24 -070065 for (const auto& thread : list_) {
66 if (thread->tid_ == tid) {
Elliott Hughesabbe07d2012-06-05 17:42:23 -070067 return true;
68 }
69 }
70 return false;
71}
72
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -070073pid_t ThreadList::GetLockOwner() {
Ian Rogersb726dcb2012-09-05 08:57:23 -070074 return Locks::thread_list_lock_->GetExclusiveOwnerTid();
Elliott Hughesaccd83d2011-10-17 14:25:58 -070075}
76
Elliott Hughesc967f782012-04-16 10:23:15 -070077void ThreadList::DumpForSigQuit(std::ostream& os) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -070078 {
Ian Rogers50b35e22012-10-04 10:09:15 -070079 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -070080 DumpLocked(os);
81 }
Elliott Hughesabbe07d2012-06-05 17:42:23 -070082 DumpUnattachedThreads(os);
83}
84
Ian Rogerscfaa4552012-11-26 21:00:08 -080085static void DumpUnattachedThread(std::ostream& os, pid_t tid) NO_THREAD_SAFETY_ANALYSIS {
86 // TODO: No thread safety analysis as DumpState with a NULL thread won't access fields, should
87 // refactor DumpState to avoid skipping analysis.
Elliott Hughesabbe07d2012-06-05 17:42:23 -070088 Thread::DumpState(os, NULL, tid);
89 DumpKernelStack(os, tid, " kernel: ", false);
Brian Carlstromed8b7232012-06-27 17:54:47 -070090 // TODO: Reenable this when the native code in system_server can handle it.
91 // Currently "adb shell kill -3 `pid system_server`" will cause it to exit.
92 if (false) {
93 DumpNativeStack(os, tid, " native: ", false);
94 }
Elliott Hughesabbe07d2012-06-05 17:42:23 -070095 os << "\n";
96}
97
98void ThreadList::DumpUnattachedThreads(std::ostream& os) {
99 DIR* d = opendir("/proc/self/task");
100 if (!d) {
101 return;
102 }
103
Ian Rogers50b35e22012-10-04 10:09:15 -0700104 Thread* self = Thread::Current();
Elliott Hughes4696b5b2012-10-30 10:35:10 -0700105 dirent* e;
106 while ((e = readdir(d)) != NULL) {
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700107 char* end;
Elliott Hughes4696b5b2012-10-30 10:35:10 -0700108 pid_t tid = strtol(e->d_name, &end, 10);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700109 if (!*end) {
110 bool contains;
111 {
Ian Rogers50b35e22012-10-04 10:09:15 -0700112 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700113 contains = Contains(tid);
114 }
115 if (!contains) {
116 DumpUnattachedThread(os, tid);
117 }
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700118 }
119 }
120 closedir(d);
Elliott Hughesff738062012-02-03 15:00:42 -0800121}
122
123void ThreadList::DumpLocked(std::ostream& os) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700124 os << "DALVIK THREADS (" << list_.size() << "):\n";
Mathieu Chartier02e25112013-08-14 16:14:24 -0700125 for (const auto& thread : list_) {
126 thread->Dump(os);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700127 os << "\n";
128 }
129}
130
Ian Rogers50b35e22012-10-04 10:09:15 -0700131void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2) {
132 MutexLock mu(self, *Locks::thread_list_lock_);
133 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700134 for (const auto& thread : list_) {
jeffhao725a9572012-11-13 18:20:12 -0800135 if (thread != ignore1 && thread != ignore2) {
Ian Rogers01ae5802012-09-28 16:14:01 -0700136 CHECK(thread->IsSuspended())
137 << "\nUnsuspended thread: <<" << *thread << "\n"
138 << "self: <<" << *Thread::Current();
139 }
Elliott Hughes8d768a92011-09-14 16:35:25 -0700140 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700141}
142
Ian Rogers66aee5c2012-08-15 17:17:47 -0700143#if HAVE_TIMED_RWLOCK
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700144// Attempt to rectify locks so that we dump thread list with required locks before exiting.
Ian Rogers81d425b2012-09-27 16:03:43 -0700145static void UnsafeLogFatalForThreadSuspendAllTimeout(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700146 Runtime* runtime = Runtime::Current();
147 std::ostringstream ss;
148 ss << "Thread suspend timeout\n";
149 runtime->DumpLockHolders(ss);
150 ss << "\n";
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700151 runtime->GetThreadList()->DumpLocked(ss);
152 LOG(FATAL) << ss.str();
153}
Ian Rogers66aee5c2012-08-15 17:17:47 -0700154#endif
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700155
Mathieu Chartier0e4627e2012-10-23 16:13:36 -0700156size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700157 Thread* self = Thread::Current();
158 if (kIsDebugBuild) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800159 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700160 Locks::thread_list_lock_->AssertNotHeld(self);
161 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
162 CHECK_NE(self->GetState(), kRunnable);
163 }
164
165 std::vector<Thread*> suspended_count_modified_threads;
166 size_t count = 0;
167 {
168 // Call a checkpoint function for each thread, threads which are suspend get their checkpoint
169 // manually called.
170 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700171 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700172 for (const auto& thread : list_) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700173 if (thread != self) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700174 while (true) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700175 if (thread->RequestCheckpoint(checkpoint_function)) {
176 // This thread will run it's checkpoint some time in the near future.
177 count++;
178 break;
179 } else {
180 // We are probably suspended, try to make sure that we stay suspended.
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700181 // The thread switched back to runnable.
182 if (thread->GetState() == kRunnable) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700183 // Spurious fail, try again.
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700184 continue;
185 }
186 thread->ModifySuspendCount(self, +1, false);
187 suspended_count_modified_threads.push_back(thread);
188 break;
189 }
190 }
191 }
192 }
193 }
194
195 // Run the checkpoint on ourself while we wait for threads to suspend.
196 checkpoint_function->Run(self);
197
198 // Run the checkpoint on the suspended threads.
Mathieu Chartier02e25112013-08-14 16:14:24 -0700199 for (const auto& thread : suspended_count_modified_threads) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700200 if (!thread->IsSuspended()) {
201 // Wait until the thread is suspended.
202 uint64_t start = NanoTime();
203 do {
204 // Sleep for 100us.
205 usleep(100);
206 } while (!thread->IsSuspended());
207 uint64_t end = NanoTime();
208 // Shouldn't need to wait for longer than 1 millisecond.
209 const uint64_t threshold = 1;
210 if (NsToMs(end - start) > threshold) {
Sameer Abu Asala8439542013-02-14 16:06:42 -0800211 LOG(INFO) << "Warning: waited longer than " << threshold
212 << " ms for thread suspend\n";
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700213 }
214 }
215 // We know for sure that the thread is suspended at this point.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700216 checkpoint_function->Run(thread);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700217 {
218 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
219 thread->ModifySuspendCount(self, -1, false);
220 }
221 }
222
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800223 {
224 // Imitate ResumeAll, threads may be waiting on Thread::resume_cond_ since we raised their
225 // suspend count. Now the suspend_count_ is lowered so we must do the broadcast.
226 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
227 Thread::resume_cond_->Broadcast(self);
228 }
229
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700230 // Add one for self.
231 return count + suspended_count_modified_threads.size() + 1;
232}
233
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700234void ThreadList::SuspendAll() {
235 Thread* self = Thread::Current();
236
237 VLOG(threads) << *self << " SuspendAll starting...";
238
239 if (kIsDebugBuild) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700240 Locks::mutator_lock_->AssertNotHeld(self);
241 Locks::thread_list_lock_->AssertNotHeld(self);
242 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700243 CHECK_NE(self->GetState(), kRunnable);
244 }
245 {
Ian Rogers81d425b2012-09-27 16:03:43 -0700246 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700247 {
Ian Rogers81d425b2012-09-27 16:03:43 -0700248 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700249 // Update global suspend all state for attaching threads.
250 ++suspend_all_count_;
251 // Increment everybody's suspend count (except our own).
Mathieu Chartier02e25112013-08-14 16:14:24 -0700252 for (const auto& thread : list_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700253 if (thread == self) {
254 continue;
255 }
256 VLOG(threads) << "requesting thread suspend: " << *thread;
Ian Rogers01ae5802012-09-28 16:14:01 -0700257 thread->ModifySuspendCount(self, +1, false);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700258 }
259 }
260 }
261
Ian Rogers66aee5c2012-08-15 17:17:47 -0700262 // Block on the mutator lock until all Runnable threads release their share of access.
263#if HAVE_TIMED_RWLOCK
264 // Timeout if we wait more than 30 seconds.
Ian Rogersc604d732012-10-14 16:09:54 -0700265 if (UNLIKELY(!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0))) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700266 UnsafeLogFatalForThreadSuspendAllTimeout(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700267 }
Ian Rogers66aee5c2012-08-15 17:17:47 -0700268#else
Ian Rogers81d425b2012-09-27 16:03:43 -0700269 Locks::mutator_lock_->ExclusiveLock(self);
Ian Rogers66aee5c2012-08-15 17:17:47 -0700270#endif
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700271
272 // Debug check that all threads are suspended.
Ian Rogers50b35e22012-10-04 10:09:15 -0700273 AssertThreadsAreSuspended(self, self);
Elliott Hughes8d768a92011-09-14 16:35:25 -0700274
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800275 VLOG(threads) << *self << " SuspendAll complete";
Elliott Hughes8d768a92011-09-14 16:35:25 -0700276}
277
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700278void ThreadList::ResumeAll() {
279 Thread* self = Thread::Current();
280
281 VLOG(threads) << *self << " ResumeAll starting";
Ian Rogers01ae5802012-09-28 16:14:01 -0700282
283 // Debug check that all threads are suspended.
Ian Rogers50b35e22012-10-04 10:09:15 -0700284 AssertThreadsAreSuspended(self, self);
Ian Rogers01ae5802012-09-28 16:14:01 -0700285
Ian Rogers81d425b2012-09-27 16:03:43 -0700286 Locks::mutator_lock_->ExclusiveUnlock(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700287 {
Ian Rogers81d425b2012-09-27 16:03:43 -0700288 MutexLock mu(self, *Locks::thread_list_lock_);
289 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700290 // Update global suspend all state for attaching threads.
291 --suspend_all_count_;
292 // Decrement the suspend counts for all threads.
Mathieu Chartier02e25112013-08-14 16:14:24 -0700293 for (const auto& thread : list_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700294 if (thread == self) {
295 continue;
296 }
Ian Rogers01ae5802012-09-28 16:14:01 -0700297 thread->ModifySuspendCount(self, -1, false);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700298 }
299
300 // Broadcast a notification to all suspended threads, some or all of
301 // which may choose to wake up. No need to wait for them.
302 VLOG(threads) << *self << " ResumeAll waking others";
Ian Rogersc604d732012-10-14 16:09:54 -0700303 Thread::resume_cond_->Broadcast(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700304 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700305 VLOG(threads) << *self << " ResumeAll complete";
306}
307
308void ThreadList::Resume(Thread* thread, bool for_debugger) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700309 Thread* self = Thread::Current();
310 DCHECK_NE(thread, self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700311 VLOG(threads) << "Resume(" << *thread << ") starting..." << (for_debugger ? " (debugger)" : "");
Elliott Hughes01158d72011-09-19 19:47:10 -0700312
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700313 {
314 // To check Contains.
Ian Rogers81d425b2012-09-27 16:03:43 -0700315 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700316 // To check IsSuspended.
Ian Rogers81d425b2012-09-27 16:03:43 -0700317 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
318 DCHECK(thread->IsSuspended());
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700319 if (!Contains(thread)) {
320 return;
321 }
Ian Rogers01ae5802012-09-28 16:14:01 -0700322 thread->ModifySuspendCount(self, -1, for_debugger);
Elliott Hughes01158d72011-09-19 19:47:10 -0700323 }
324
325 {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700326 VLOG(threads) << "Resume(" << *thread << ") waking others";
Ian Rogers81d425b2012-09-27 16:03:43 -0700327 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersc604d732012-10-14 16:09:54 -0700328 Thread::resume_cond_->Broadcast(self);
Elliott Hughes01158d72011-09-19 19:47:10 -0700329 }
330
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700331 VLOG(threads) << "Resume(" << *thread << ") complete";
332}
Elliott Hughes01158d72011-09-19 19:47:10 -0700333
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700334static void ThreadSuspendByPeerWarning(Thread* self, int level, const char* message, jobject peer) {
335 JNIEnvExt* env = self->GetJniEnv();
336 ScopedLocalRef<jstring>
337 scoped_name_string(env, (jstring)env->GetObjectField(peer,
338 WellKnownClasses::java_lang_Thread_name));
339 ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
340 if (scoped_name_chars.c_str() == NULL) {
341 LOG(level) << message << ": " << peer;
342 env->ExceptionClear();
343 } else {
344 LOG(level) << message << ": " << peer << ":" << scoped_name_chars.c_str();
345 }
346}
347
348// Unlike suspending all threads where we can wait to acquire the mutator_lock_, suspending an
349// individual thread requires polling. delay_us is the requested sleep and total_delay_us
350// accumulates the total time spent sleeping for timeouts. The first sleep is just a yield,
351// subsequently sleeps increase delay_us from 1ms to 500ms by doubling.
352static void ThreadSuspendSleep(Thread* self, useconds_t* delay_us, useconds_t* total_delay_us) {
353 for (int i = kLockLevelCount - 1; i >= 0; --i) {
354 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
355 if (held_mutex != NULL) {
356 LOG(FATAL) << "Holding " << held_mutex->GetName() << " while sleeping for thread suspension";
357 }
358 }
359 {
360 useconds_t new_delay_us = (*delay_us) * 2;
361 CHECK_GE(new_delay_us, *delay_us);
362 if (new_delay_us < 500000) { // Don't allow sleeping to be more than 0.5s.
363 *delay_us = new_delay_us;
364 }
365 }
366 if ((*delay_us) == 0) {
367 sched_yield();
368 // Default to 1 milliseconds (note that this gets multiplied by 2 before the first sleep).
369 (*delay_us) = 500;
370 } else {
371 usleep(*delay_us);
372 (*total_delay_us) += (*delay_us);
373 }
374}
375
376Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
377 bool debug_suspension, bool* timed_out) {
378 static const useconds_t kTimeoutUs = 30 * 1000000; // 30s.
379 useconds_t total_delay_us = 0;
380 useconds_t delay_us = 0;
381 bool did_suspend_request = false;
382 *timed_out = false;
383 Thread* self = Thread::Current();
384 while (true) {
385 Thread* thread;
386 {
387 ScopedObjectAccess soa(self);
388 MutexLock mu(self, *Locks::thread_list_lock_);
389 thread = Thread::FromManagedThread(soa, peer);
390 if (thread == NULL) {
391 ThreadSuspendByPeerWarning(self, WARNING, "No such thread for suspend", peer);
392 return NULL;
393 }
394 {
395 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
396 if (request_suspension) {
397 thread->ModifySuspendCount(self, +1, debug_suspension);
398 request_suspension = false;
399 did_suspend_request = true;
400 } else {
401 // If the caller isn't requesting suspension, a suspension should have already occurred.
402 CHECK_GT(thread->GetSuspendCount(), 0);
403 }
404 // IsSuspended on the current thread will fail as the current thread is changed into
405 // Runnable above. As the suspend count is now raised if this is the current thread
406 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
407 // to just explicitly handle the current thread in the callers to this code.
408 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
409 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
410 // count, or else we've waited and it has self suspended) or is the current thread, we're
411 // done.
412 if (thread->IsSuspended()) {
413 return thread;
414 }
415 if (total_delay_us >= kTimeoutUs) {
416 ThreadSuspendByPeerWarning(self, ERROR, "Thread suspension timed out", peer);
417 if (did_suspend_request) {
418 thread->ModifySuspendCount(soa.Self(), -1, debug_suspension);
419 }
420 *timed_out = true;
421 return NULL;
422 }
423 }
424 // Release locks and come out of runnable state.
425 }
426 ThreadSuspendSleep(self, &delay_us, &total_delay_us);
427 }
428}
429
430static void ThreadSuspendByThreadIdWarning(int level, const char* message, uint32_t thread_id) {
431 LOG(level) << StringPrintf("%s: %d", message, thread_id);
432}
433
434Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension,
435 bool* timed_out) {
436 static const useconds_t kTimeoutUs = 30 * 1000000; // 30s.
437 useconds_t total_delay_us = 0;
438 useconds_t delay_us = 0;
439 bool did_suspend_request = false;
440 *timed_out = false;
441 Thread* self = Thread::Current();
442 CHECK_NE(thread_id, kInvalidThreadId);
443 while (true) {
444 Thread* thread = NULL;
445 {
446 ScopedObjectAccess soa(self);
447 MutexLock mu(self, *Locks::thread_list_lock_);
448 for (const auto& it : list_) {
449 if (it->GetThreadId() == thread_id) {
450 thread = it;
451 break;
452 }
453 }
454 if (thread == NULL) {
455 // There's a race in inflating a lock and the owner giving up ownership and then dying.
456 ThreadSuspendByThreadIdWarning(WARNING, "No such thread id for suspend", thread_id);
457 return NULL;
458 }
459 {
460 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
461 if (!did_suspend_request) {
462 thread->ModifySuspendCount(self, +1, debug_suspension);
463 did_suspend_request = true;
464 } else {
465 // If the caller isn't requesting suspension, a suspension should have already occurred.
466 CHECK_GT(thread->GetSuspendCount(), 0);
467 }
468 // IsSuspended on the current thread will fail as the current thread is changed into
469 // Runnable above. As the suspend count is now raised if this is the current thread
470 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
471 // to just explicitly handle the current thread in the callers to this code.
472 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
473 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
474 // count, or else we've waited and it has self suspended) or is the current thread, we're
475 // done.
476 if (thread->IsSuspended()) {
477 return thread;
478 }
479 if (total_delay_us >= kTimeoutUs) {
480 ThreadSuspendByThreadIdWarning(ERROR, "Thread suspension timed out", thread_id);
481 if (did_suspend_request) {
482 thread->ModifySuspendCount(soa.Self(), -1, debug_suspension);
483 }
484 *timed_out = true;
485 return NULL;
486 }
487 }
488 // Release locks and come out of runnable state.
489 }
490 ThreadSuspendSleep(self, &delay_us, &total_delay_us);
491 }
492}
493
494Thread* ThreadList::FindThreadByThreadId(uint32_t thin_lock_id) {
495 Thread* self = Thread::Current();
496 MutexLock mu(self, *Locks::thread_list_lock_);
497 for (const auto& thread : list_) {
498 if (thread->GetThreadId() == thin_lock_id) {
499 CHECK(thread == self || thread->IsSuspended());
500 return thread;
501 }
502 }
503 return NULL;
504}
505
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700506void ThreadList::SuspendAllForDebugger() {
507 Thread* self = Thread::Current();
508 Thread* debug_thread = Dbg::GetDebugThread();
509
510 VLOG(threads) << *self << " SuspendAllForDebugger starting...";
511
512 {
Ian Rogers81d425b2012-09-27 16:03:43 -0700513 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700514 {
Ian Rogers81d425b2012-09-27 16:03:43 -0700515 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700516 // Update global suspend all state for attaching threads.
517 ++suspend_all_count_;
518 ++debug_suspend_all_count_;
519 // Increment everybody's suspend count (except our own).
Mathieu Chartier02e25112013-08-14 16:14:24 -0700520 for (const auto& thread : list_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700521 if (thread == self || thread == debug_thread) {
522 continue;
523 }
524 VLOG(threads) << "requesting thread suspend: " << *thread;
Ian Rogers01ae5802012-09-28 16:14:01 -0700525 thread->ModifySuspendCount(self, +1, true);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700526 }
527 }
528 }
529
Ian Rogers66aee5c2012-08-15 17:17:47 -0700530 // Block on the mutator lock until all Runnable threads release their share of access then
531 // immediately unlock again.
532#if HAVE_TIMED_RWLOCK
533 // Timeout if we wait more than 30 seconds.
Ian Rogersc604d732012-10-14 16:09:54 -0700534 if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0)) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700535 UnsafeLogFatalForThreadSuspendAllTimeout(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700536 } else {
Ian Rogers81d425b2012-09-27 16:03:43 -0700537 Locks::mutator_lock_->ExclusiveUnlock(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700538 }
Ian Rogers66aee5c2012-08-15 17:17:47 -0700539#else
Ian Rogers81d425b2012-09-27 16:03:43 -0700540 Locks::mutator_lock_->ExclusiveLock(self);
541 Locks::mutator_lock_->ExclusiveUnlock(self);
Ian Rogers66aee5c2012-08-15 17:17:47 -0700542#endif
Ian Rogers50b35e22012-10-04 10:09:15 -0700543 AssertThreadsAreSuspended(self, self, debug_thread);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700544
545 VLOG(threads) << *self << " SuspendAll complete";
Elliott Hughes01158d72011-09-19 19:47:10 -0700546}
547
Elliott Hughes475fc232011-10-25 15:00:35 -0700548void ThreadList::SuspendSelfForDebugger() {
549 Thread* self = Thread::Current();
Elliott Hughes01158d72011-09-19 19:47:10 -0700550
Elliott Hughes475fc232011-10-25 15:00:35 -0700551 // The debugger thread must not suspend itself due to debugger activity!
552 Thread* debug_thread = Dbg::GetDebugThread();
553 CHECK(debug_thread != NULL);
554 CHECK(self != debug_thread);
jeffhaoa77f0f62012-12-05 17:19:31 -0800555 CHECK_NE(self->GetState(), kRunnable);
556 Locks::mutator_lock_->AssertNotHeld(self);
Elliott Hughes475fc232011-10-25 15:00:35 -0700557
jeffhaoa77f0f62012-12-05 17:19:31 -0800558 {
559 // Collisions with other suspends aren't really interesting. We want
560 // to ensure that we're the only one fiddling with the suspend count
561 // though.
562 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
563 self->ModifySuspendCount(self, +1, true);
564 CHECK_GT(self->suspend_count_, 0);
565 }
Elliott Hughes475fc232011-10-25 15:00:35 -0700566
Elliott Hughes1f729aa2012-03-02 13:55:41 -0800567 VLOG(threads) << *self << " self-suspending (debugger)";
Elliott Hughes475fc232011-10-25 15:00:35 -0700568
569 // Tell JDWP that we've completed suspension. The JDWP thread can't
570 // tell us to resume before we're fully asleep because we hold the
571 // suspend count lock.
572 Dbg::ClearWaitForEventThread();
573
jeffhaoa77f0f62012-12-05 17:19:31 -0800574 {
575 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
576 while (self->suspend_count_ != 0) {
577 Thread::resume_cond_->Wait(self);
578 if (self->suspend_count_ != 0) {
579 // The condition was signaled but we're still suspended. This
580 // can happen if the debugger lets go while a SIGQUIT thread
581 // dump event is pending (assuming SignalCatcher was resumed for
582 // just long enough to try to grab the thread-suspend lock).
583 LOG(DEBUG) << *self << " still suspended after undo "
584 << "(suspend count=" << self->suspend_count_ << ")";
585 }
Elliott Hughes475fc232011-10-25 15:00:35 -0700586 }
jeffhaoa77f0f62012-12-05 17:19:31 -0800587 CHECK_EQ(self->suspend_count_, 0);
Elliott Hughes475fc232011-10-25 15:00:35 -0700588 }
jeffhaoa77f0f62012-12-05 17:19:31 -0800589
Elliott Hughes1f729aa2012-03-02 13:55:41 -0800590 VLOG(threads) << *self << " self-reviving (debugger)";
Elliott Hughes475fc232011-10-25 15:00:35 -0700591}
592
Elliott Hughes234ab152011-10-26 14:02:26 -0700593void ThreadList::UndoDebuggerSuspensions() {
594 Thread* self = Thread::Current();
595
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800596 VLOG(threads) << *self << " UndoDebuggerSuspensions starting";
Elliott Hughes234ab152011-10-26 14:02:26 -0700597
598 {
Ian Rogers81d425b2012-09-27 16:03:43 -0700599 MutexLock mu(self, *Locks::thread_list_lock_);
600 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700601 // Update global suspend all state for attaching threads.
602 suspend_all_count_ -= debug_suspend_all_count_;
603 debug_suspend_all_count_ = 0;
604 // Update running threads.
Mathieu Chartier02e25112013-08-14 16:14:24 -0700605 for (const auto& thread : list_) {
Elliott Hughes234ab152011-10-26 14:02:26 -0700606 if (thread == self || thread->debug_suspend_count_ == 0) {
607 continue;
608 }
Ian Rogers01ae5802012-09-28 16:14:01 -0700609 thread->ModifySuspendCount(self, -thread->debug_suspend_count_, true);
Elliott Hughes234ab152011-10-26 14:02:26 -0700610 }
611 }
612
613 {
Ian Rogers81d425b2012-09-27 16:03:43 -0700614 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersc604d732012-10-14 16:09:54 -0700615 Thread::resume_cond_->Broadcast(self);
Elliott Hughes234ab152011-10-26 14:02:26 -0700616 }
617
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800618 VLOG(threads) << "UndoDebuggerSuspensions(" << *self << ") complete";
Elliott Hughes234ab152011-10-26 14:02:26 -0700619}
620
Elliott Hughese52e49b2012-04-02 16:05:44 -0700621void ThreadList::WaitForOtherNonDaemonThreadsToExit() {
Ian Rogers81d425b2012-09-27 16:03:43 -0700622 Thread* self = Thread::Current();
623 Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700624 bool all_threads_are_daemons;
625 do {
Ian Rogers120f1c72012-09-28 17:17:10 -0700626 {
627 // No more threads can be born after we start to shutdown.
628 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
629 CHECK(Runtime::Current()->IsShuttingDown());
630 CHECK_EQ(Runtime::Current()->NumberOfThreadsBeingBorn(), 0U);
631 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700632 all_threads_are_daemons = true;
Ian Rogers120f1c72012-09-28 17:17:10 -0700633 MutexLock mu(self, *Locks::thread_list_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700634 for (const auto& thread : list_) {
Anwar Ghuloum97543682013-06-14 12:58:16 -0700635 if (thread != self && !thread->IsDaemon()) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700636 all_threads_are_daemons = false;
637 break;
638 }
639 }
640 if (!all_threads_are_daemons) {
641 // Wait for another thread to exit before re-checking.
Ian Rogersc604d732012-10-14 16:09:54 -0700642 thread_exit_cond_.Wait(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700643 }
Brian Carlstromdf629502013-07-17 22:39:56 -0700644 } while (!all_threads_are_daemons);
Elliott Hughes038a8062011-09-18 14:12:41 -0700645}
646
647void ThreadList::SuspendAllDaemonThreads() {
Ian Rogers81d425b2012-09-27 16:03:43 -0700648 Thread* self = Thread::Current();
649 MutexLock mu(self, *Locks::thread_list_lock_);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700650 { // Tell all the daemons it's time to suspend.
Ian Rogers81d425b2012-09-27 16:03:43 -0700651 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700652 for (const auto& thread : list_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700653 // This is only run after all non-daemon threads have exited, so the remainder should all be
654 // daemons.
Ian Rogers7e762862012-10-22 15:45:08 -0700655 CHECK(thread->IsDaemon()) << *thread;
Ian Rogers81d425b2012-09-27 16:03:43 -0700656 if (thread != self) {
Ian Rogers01ae5802012-09-28 16:14:01 -0700657 thread->ModifySuspendCount(self, +1, false);
Elliott Hughese52e49b2012-04-02 16:05:44 -0700658 }
Elliott Hughes038a8062011-09-18 14:12:41 -0700659 }
660 }
Elliott Hughes038a8062011-09-18 14:12:41 -0700661 // Give the threads a chance to suspend, complaining if they're slow.
662 bool have_complained = false;
663 for (int i = 0; i < 10; ++i) {
664 usleep(200 * 1000);
665 bool all_suspended = true;
Mathieu Chartier02e25112013-08-14 16:14:24 -0700666 for (const auto& thread : list_) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700667 if (thread != self && thread->GetState() == kRunnable) {
Elliott Hughes038a8062011-09-18 14:12:41 -0700668 if (!have_complained) {
669 LOG(WARNING) << "daemon thread not yet suspended: " << *thread;
670 have_complained = true;
671 }
672 all_suspended = false;
673 }
674 }
675 if (all_suspended) {
676 return;
677 }
678 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700679 LOG(ERROR) << "suspend all daemons failed";
680}
681void ThreadList::Register(Thread* self) {
682 DCHECK_EQ(self, Thread::Current());
683
684 if (VLOG_IS_ON(threads)) {
685 std::ostringstream oss;
686 self->ShortDump(oss); // We don't hold the mutator_lock_ yet and so cannot call Dump.
687 LOG(INFO) << "ThreadList::Register() " << *self << "\n" << oss;
688 }
689
690 // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing
691 // SuspendAll requests.
Ian Rogers81d425b2012-09-27 16:03:43 -0700692 MutexLock mu(self, *Locks::thread_list_lock_);
693 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700694 self->suspend_count_ = suspend_all_count_;
695 self->debug_suspend_count_ = debug_suspend_all_count_;
Ian Rogers01ae5802012-09-28 16:14:01 -0700696 if (self->suspend_count_ > 0) {
697 self->AtomicSetFlag(kSuspendRequest);
698 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700699 CHECK(!Contains(self));
700 list_.push_back(self);
701}
702
703void ThreadList::Unregister(Thread* self) {
704 DCHECK_EQ(self, Thread::Current());
705
706 VLOG(threads) << "ThreadList::Unregister() " << *self;
707
708 // Any time-consuming destruction, plus anything that can call back into managed code or
709 // suspend and so on, must happen at this point, and not in ~Thread.
710 self->Destroy();
711
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700712 uint32_t thin_lock_id = self->thin_lock_thread_id_;
713 self->thin_lock_thread_id_ = 0;
Ian Rogerscfaa4552012-11-26 21:00:08 -0800714 ReleaseThreadId(self, thin_lock_id);
715 while (self != NULL) {
716 // Remove and delete the Thread* while holding the thread_list_lock_ and
717 // thread_suspend_count_lock_ so that the unregistering thread cannot be suspended.
Ian Rogers0878d652013-04-18 17:38:35 -0700718 // Note: deliberately not using MutexLock that could hold a stale self pointer.
719 Locks::thread_list_lock_->ExclusiveLock(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700720 CHECK(Contains(self));
Ian Rogerscfaa4552012-11-26 21:00:08 -0800721 // Note: we don't take the thread_suspend_count_lock_ here as to be suspending a thread other
722 // than yourself you need to hold the thread_list_lock_ (see Thread::ModifySuspendCount).
723 if (!self->IsSuspended()) {
724 list_.remove(self);
725 delete self;
726 self = NULL;
727 }
Ian Rogers0878d652013-04-18 17:38:35 -0700728 Locks::thread_list_lock_->ExclusiveUnlock(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700729 }
730
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700731 // Clear the TLS data, so that the underlying native thread is recognizably detached.
732 // (It may wish to reattach later.)
733 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, NULL), "detach self");
734
735 // Signal that a thread just detached.
Ian Rogers81d425b2012-09-27 16:03:43 -0700736 MutexLock mu(NULL, *Locks::thread_list_lock_);
Ian Rogersc604d732012-10-14 16:09:54 -0700737 thread_exit_cond_.Signal(NULL);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700738}
739
740void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700741 for (const auto& thread : list_) {
742 callback(thread, context);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700743 }
744}
745
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800746void ThreadList::VisitRoots(RootVisitor* visitor, void* arg) const {
Ian Rogers81d425b2012-09-27 16:03:43 -0700747 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700748 for (const auto& thread : list_) {
749 thread->VisitRoots(visitor, arg);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700750 }
Elliott Hughes038a8062011-09-18 14:12:41 -0700751}
752
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700753struct VerifyRootWrapperArg {
754 VerifyRootVisitor* visitor;
755 void* arg;
756};
757
758static mirror::Object* VerifyRootWrapperCallback(mirror::Object* root, void* arg) {
759 VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
760 wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
761 return root;
762}
763
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800764void ThreadList::VerifyRoots(VerifyRootVisitor* visitor, void* arg) const {
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700765 VerifyRootWrapperArg wrapper;
766 wrapper.visitor = visitor;
767 wrapper.arg = arg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700768 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700769 for (const auto& thread : list_) {
Mathieu Chartier423d2a32013-09-12 17:33:56 -0700770 thread->VisitRoots(VerifyRootWrapperCallback, &wrapper);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700771 }
772}
773
Ian Rogerscfaa4552012-11-26 21:00:08 -0800774uint32_t ThreadList::AllocThreadId(Thread* self) {
775 MutexLock mu(self, allocated_ids_lock_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700776 for (size_t i = 0; i < allocated_ids_.size(); ++i) {
777 if (!allocated_ids_[i]) {
778 allocated_ids_.set(i);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700779 return i + 1; // Zero is reserved to mean "invalid".
Elliott Hughes8daa0922011-09-11 13:46:25 -0700780 }
781 }
782 LOG(FATAL) << "Out of internal thread ids";
783 return 0;
784}
785
Ian Rogerscfaa4552012-11-26 21:00:08 -0800786void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
787 MutexLock mu(self, allocated_ids_lock_);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700788 --id; // Zero is reserved to mean "invalid".
Elliott Hughes8daa0922011-09-11 13:46:25 -0700789 DCHECK(allocated_ids_[id]) << id;
790 allocated_ids_.reset(id);
791}
792
Elliott Hughes8daa0922011-09-11 13:46:25 -0700793} // namespace art