blob: 03b03de12cecc82f1e4b2a060aaacefbbaef9bff [file] [log] [blame]
Elliott Hughes8daa0922011-09-11 13:46:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "thread_list.h"
18
Christopher Ferris6cff48f2014-01-26 21:36:13 -080019#include <backtrace/BacktraceMap.h>
Elliott Hughesabbe07d2012-06-05 17:42:23 -070020#include <dirent.h>
Ian Rogersd9c4fc92013-10-01 19:45:43 -070021#include <ScopedLocalRef.h>
22#include <ScopedUtfChars.h>
Elliott Hughesabbe07d2012-06-05 17:42:23 -070023#include <sys/types.h>
Elliott Hughes038a8062011-09-18 14:12:41 -070024#include <unistd.h>
25
Ian Rogersc7dd2952014-10-21 23:31:19 -070026#include <sstream>
27
Mathieu Chartier70a596d2014-12-17 14:56:47 -080028#include "base/histogram-inl.h"
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -070029#include "base/mutex-inl.h"
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080030#include "base/systrace.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010031#include "base/time_utils.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080032#include "base/timing_logger.h"
Elliott Hughes475fc232011-10-25 15:00:35 -070033#include "debugger.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070034#include "gc/collector/concurrent_copying.h"
Ian Rogersd9c4fc92013-10-01 19:45:43 -070035#include "jni_internal.h"
36#include "lock_word.h"
37#include "monitor.h"
Andreas Gampe5dd44d02016-08-02 17:20:03 -070038#include "native_stack_dump.h"
Ian Rogersd9c4fc92013-10-01 19:45:43 -070039#include "scoped_thread_state_change.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080040#include "thread.h"
Jeff Haoe094b872014-10-14 13:12:01 -070041#include "trace.h"
Ian Rogersd9c4fc92013-10-01 19:45:43 -070042#include "well_known_classes.h"
Elliott Hughes475fc232011-10-25 15:00:35 -070043
Yu Lieac44242015-06-29 10:50:03 +080044#if ART_USE_FUTEXES
45#include "linux/futex.h"
46#include "sys/syscall.h"
47#ifndef SYS_futex
48#define SYS_futex __NR_futex
49#endif
50#endif // ART_USE_FUTEXES
51
Elliott Hughes8daa0922011-09-11 13:46:25 -070052namespace art {
53
Mathieu Chartier251755c2014-07-15 18:10:25 -070054static constexpr uint64_t kLongThreadSuspendThreshold = MsToNs(5);
Mathieu Chartier99143862015-02-03 14:26:46 -080055static constexpr uint64_t kThreadSuspendTimeoutMs = 30 * 1000; // 30s.
56// Use 0 since we want to yield to prevent blocking for an unpredictable amount of time.
57static constexpr useconds_t kThreadSuspendInitialSleepUs = 0;
58static constexpr useconds_t kThreadSuspendMaxYieldUs = 3000;
59static constexpr useconds_t kThreadSuspendMaxSleepUs = 5000;
Mathieu Chartier251755c2014-07-15 18:10:25 -070060
Andreas Gampe8d1594d2016-03-01 14:38:37 -080061// Whether we should try to dump the native stack of unattached threads. See commit ed8b723 for
62// some history.
Andreas Gampea3e8fc32016-06-13 16:15:33 -070063// Turned off again. b/29248079
64static constexpr bool kDumpUnattachedThreadNativeStack = false;
Andreas Gampe8d1594d2016-03-01 14:38:37 -080065
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -080066ThreadList::ThreadList()
Mathieu Chartierb56200b2015-10-29 10:41:51 -070067 : suspend_all_count_(0),
68 debug_suspend_all_count_(0),
69 unregistering_count_(0),
70 suspend_all_historam_("suspend all histogram", 16, 64),
71 long_suspend_(false) {
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -080072 CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
Elliott Hughes8daa0922011-09-11 13:46:25 -070073}
74
75ThreadList::~ThreadList() {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080076 ScopedTrace trace(__PRETTY_FUNCTION__);
Elliott Hughese52e49b2012-04-02 16:05:44 -070077 // Detach the current thread if necessary. If we failed to start, there might not be any threads.
Elliott Hughes6a144332012-04-03 13:07:11 -070078 // We need to detach the current thread here in case there's another thread waiting to join with
79 // us.
Mathieu Chartierfec72f42014-10-09 12:57:58 -070080 bool contains = false;
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080081 Thread* self = Thread::Current();
Mathieu Chartierfec72f42014-10-09 12:57:58 -070082 {
Mathieu Chartierfec72f42014-10-09 12:57:58 -070083 MutexLock mu(self, *Locks::thread_list_lock_);
84 contains = Contains(self);
85 }
86 if (contains) {
Elliott Hughes8daa0922011-09-11 13:46:25 -070087 Runtime::Current()->DetachCurrentThread();
88 }
Elliott Hughes6a144332012-04-03 13:07:11 -070089 WaitForOtherNonDaemonThreadsToExit();
Mathieu Chartier51168372015-08-12 16:40:32 -070090 // Disable GC and wait for GC to complete in case there are still daemon threads doing
91 // allocations.
92 gc::Heap* const heap = Runtime::Current()->GetHeap();
93 heap->DisableGCForShutdown();
94 // In case a GC is in progress, wait for it to finish.
95 heap->WaitForGcToComplete(gc::kGcCauseBackground, Thread::Current());
Ian Rogers00f7d0e2012-07-19 15:28:27 -070096 // TODO: there's an unaddressed race here where a thread may attach during shutdown, see
97 // Thread::Init.
Mathieu Chartier4d87df62016-01-07 15:14:19 -080098 SuspendAllDaemonThreadsForShutdown();
Elliott Hughes8daa0922011-09-11 13:46:25 -070099}
100
101bool ThreadList::Contains(Thread* thread) {
102 return find(list_.begin(), list_.end(), thread) != list_.end();
103}
104
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700105bool ThreadList::Contains(pid_t tid) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700106 for (const auto& thread : list_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700107 if (thread->GetTid() == tid) {
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700108 return true;
109 }
110 }
111 return false;
112}
113
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700114pid_t ThreadList::GetLockOwner() {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700115 return Locks::thread_list_lock_->GetExclusiveOwnerTid();
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700116}
117
Mathieu Chartier590fee92013-09-13 13:46:47 -0700118void ThreadList::DumpNativeStacks(std::ostream& os) {
119 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
Christopher Ferris6cff48f2014-01-26 21:36:13 -0800120 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid()));
Mathieu Chartier590fee92013-09-13 13:46:47 -0700121 for (const auto& thread : list_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700122 os << "DUMPING THREAD " << thread->GetTid() << "\n";
Christopher Ferris6cff48f2014-01-26 21:36:13 -0800123 DumpNativeStack(os, thread->GetTid(), map.get(), "\t");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700124 os << "\n";
125 }
126}
127
Elliott Hughesc967f782012-04-16 10:23:15 -0700128void ThreadList::DumpForSigQuit(std::ostream& os) {
Mathieu Chartier70a596d2014-12-17 14:56:47 -0800129 {
130 ScopedObjectAccess soa(Thread::Current());
Mathieu Chartier23f6e692014-12-18 18:24:39 -0800131 // Only print if we have samples.
132 if (suspend_all_historam_.SampleSize() > 0) {
133 Histogram<uint64_t>::CumulativeData data;
134 suspend_all_historam_.CreateHistogram(&data);
135 suspend_all_historam_.PrintConfidenceIntervals(os, 0.99, data); // Dump time to suspend.
136 }
Mathieu Chartier70a596d2014-12-17 14:56:47 -0800137 }
Nicolas Geoffrayd3c59652016-03-17 09:35:04 +0000138 bool dump_native_stack = Runtime::Current()->GetDumpNativeStackOnSigQuit();
139 Dump(os, dump_native_stack);
140 DumpUnattachedThreads(os, dump_native_stack);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700141}
142
Nicolas Geoffrayd3c59652016-03-17 09:35:04 +0000143static void DumpUnattachedThread(std::ostream& os, pid_t tid, bool dump_native_stack)
144 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700145 // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should
Ian Rogerscfaa4552012-11-26 21:00:08 -0800146 // refactor DumpState to avoid skipping analysis.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700147 Thread::DumpState(os, nullptr, tid);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700148 DumpKernelStack(os, tid, " kernel: ", false);
Nicolas Geoffrayd3c59652016-03-17 09:35:04 +0000149 if (dump_native_stack && kDumpUnattachedThreadNativeStack) {
Christopher Ferris6cff48f2014-01-26 21:36:13 -0800150 DumpNativeStack(os, tid, nullptr, " native: ");
Brian Carlstromed8b7232012-06-27 17:54:47 -0700151 }
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700152 os << "\n";
153}
154
Nicolas Geoffrayd3c59652016-03-17 09:35:04 +0000155void ThreadList::DumpUnattachedThreads(std::ostream& os, bool dump_native_stack) {
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700156 DIR* d = opendir("/proc/self/task");
157 if (!d) {
158 return;
159 }
160
Ian Rogers50b35e22012-10-04 10:09:15 -0700161 Thread* self = Thread::Current();
Elliott Hughes4696b5b2012-10-30 10:35:10 -0700162 dirent* e;
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700163 while ((e = readdir(d)) != nullptr) {
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700164 char* end;
Elliott Hughes4696b5b2012-10-30 10:35:10 -0700165 pid_t tid = strtol(e->d_name, &end, 10);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700166 if (!*end) {
167 bool contains;
168 {
Ian Rogers50b35e22012-10-04 10:09:15 -0700169 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700170 contains = Contains(tid);
171 }
172 if (!contains) {
Nicolas Geoffrayd3c59652016-03-17 09:35:04 +0000173 DumpUnattachedThread(os, tid, dump_native_stack);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700174 }
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700175 }
176 }
177 closedir(d);
Elliott Hughesff738062012-02-03 15:00:42 -0800178}
179
Mathieu Chartier47c19592016-03-07 11:59:01 -0800180// Dump checkpoint timeout in milliseconds. Larger amount on the target, since the device could be
181// overloaded with ANR dumps.
182static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000;
Andreas Gampe4a3d19b2015-01-09 17:54:51 -0800183
Ian Rogers7b078e82014-09-10 14:44:24 -0700184// A closure used by Thread::Dump.
185class DumpCheckpoint FINAL : public Closure {
186 public:
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000187 DumpCheckpoint(std::ostream* os, bool dump_native_stack)
188 : os_(os),
189 barrier_(0),
190 backtrace_map_(dump_native_stack ? BacktraceMap::Create(getpid()) : nullptr),
191 dump_native_stack_(dump_native_stack) {}
Ian Rogers7b078e82014-09-10 14:44:24 -0700192
193 void Run(Thread* thread) OVERRIDE {
194 // Note thread and self may not be equal if thread was already suspended at the point of the
195 // request.
196 Thread* self = Thread::Current();
197 std::ostringstream local_os;
198 {
199 ScopedObjectAccess soa(self);
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000200 thread->Dump(local_os, dump_native_stack_, backtrace_map_.get());
Ian Rogers7b078e82014-09-10 14:44:24 -0700201 }
202 local_os << "\n";
203 {
204 // Use the logging lock to ensure serialization when writing to the common ostream.
205 MutexLock mu(self, *Locks::logging_lock_);
206 *os_ << local_os.str();
207 }
Mathieu Chartier10d25082015-10-28 18:36:09 -0700208 barrier_.Pass(self);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700209 }
Ian Rogers7b078e82014-09-10 14:44:24 -0700210
211 void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) {
212 Thread* self = Thread::Current();
213 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
Andreas Gampe1e4b0ca2015-01-14 09:06:32 -0800214 bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kDumpWaitTimeout);
Ian Rogers2156ff12014-09-13 19:20:54 -0700215 if (timed_out) {
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000216 // Avoid a recursive abort.
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700217 LOG((kIsDebugBuild && (gAborting == 0)) ? ::android::base::FATAL : ::android::base::ERROR)
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000218 << "Unexpected time out during dump checkpoint.";
Ian Rogers2156ff12014-09-13 19:20:54 -0700219 }
Ian Rogers7b078e82014-09-10 14:44:24 -0700220 }
221
222 private:
223 // The common stream that will accumulate all the dumps.
224 std::ostream* const os_;
225 // The barrier to be passed through and for the requestor to wait upon.
226 Barrier barrier_;
Christopher Ferris6cff48f2014-01-26 21:36:13 -0800227 // A backtrace map, so that all threads use a shared info and don't reacquire/parse separately.
228 std::unique_ptr<BacktraceMap> backtrace_map_;
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000229 // Whether we should dump the native stack.
230 const bool dump_native_stack_;
Ian Rogers7b078e82014-09-10 14:44:24 -0700231};
232
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000233void ThreadList::Dump(std::ostream& os, bool dump_native_stack) {
Ian Rogers7b078e82014-09-10 14:44:24 -0700234 {
235 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
236 os << "DALVIK THREADS (" << list_.size() << "):\n";
237 }
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000238 DumpCheckpoint checkpoint(&os, dump_native_stack);
Mathieu Chartiere99f5322016-06-10 17:04:20 -0700239 size_t threads_running_checkpoint;
240 {
241 // Use SOA to prevent deadlocks if multiple threads are calling Dump() at the same time.
242 ScopedObjectAccess soa(Thread::Current());
243 threads_running_checkpoint = RunCheckpoint(&checkpoint);
244 }
Lei Lidd9943d2015-02-02 14:24:44 +0800245 if (threads_running_checkpoint != 0) {
246 checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
247 }
Elliott Hughes8daa0922011-09-11 13:46:25 -0700248}
249
Ian Rogers50b35e22012-10-04 10:09:15 -0700250void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2) {
251 MutexLock mu(self, *Locks::thread_list_lock_);
252 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700253 for (const auto& thread : list_) {
jeffhao725a9572012-11-13 18:20:12 -0800254 if (thread != ignore1 && thread != ignore2) {
Ian Rogers01ae5802012-09-28 16:14:01 -0700255 CHECK(thread->IsSuspended())
256 << "\nUnsuspended thread: <<" << *thread << "\n"
257 << "self: <<" << *Thread::Current();
258 }
Elliott Hughes8d768a92011-09-14 16:35:25 -0700259 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700260}
261
Ian Rogers66aee5c2012-08-15 17:17:47 -0700262#if HAVE_TIMED_RWLOCK
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700263// Attempt to rectify locks so that we dump thread list with required locks before exiting.
Andreas Gampe794ad762015-02-23 08:12:24 -0800264NO_RETURN static void UnsafeLogFatalForThreadSuspendAllTimeout() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700265 Runtime* runtime = Runtime::Current();
266 std::ostringstream ss;
267 ss << "Thread suspend timeout\n";
Mathieu Chartier5869a2c2014-10-08 14:26:23 -0700268 Locks::mutator_lock_->Dump(ss);
269 ss << "\n";
Ian Rogers7b078e82014-09-10 14:44:24 -0700270 runtime->GetThreadList()->Dump(ss);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700271 LOG(FATAL) << ss.str();
Ian Rogers719d1a32014-03-06 12:13:39 -0800272 exit(0);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700273}
Ian Rogers66aee5c2012-08-15 17:17:47 -0700274#endif
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700275
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800276// Unlike suspending all threads where we can wait to acquire the mutator_lock_, suspending an
Mathieu Chartier99143862015-02-03 14:26:46 -0800277// individual thread requires polling. delay_us is the requested sleep wait. If delay_us is 0 then
278// we use sched_yield instead of calling usleep.
279static void ThreadSuspendSleep(useconds_t delay_us) {
280 if (delay_us == 0) {
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800281 sched_yield();
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800282 } else {
Mathieu Chartier99143862015-02-03 14:26:46 -0800283 usleep(delay_us);
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800284 }
285}
286
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700287size_t ThreadList::RunCheckpoint(Closure* checkpoint_function, Closure* callback) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700288 Thread* self = Thread::Current();
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800289 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
290 Locks::thread_list_lock_->AssertNotHeld(self);
291 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700292
293 std::vector<Thread*> suspended_count_modified_threads;
294 size_t count = 0;
295 {
296 // Call a checkpoint function for each thread, threads which are suspend get their checkpoint
297 // manually called.
298 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700299 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier10d25082015-10-28 18:36:09 -0700300 count = list_.size();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700301 for (const auto& thread : list_) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700302 if (thread != self) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700303 while (true) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700304 if (thread->RequestCheckpoint(checkpoint_function)) {
Dave Allison0aded082013-11-07 13:15:11 -0800305 // This thread will run its checkpoint some time in the near future.
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700306 break;
307 } else {
308 // We are probably suspended, try to make sure that we stay suspended.
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700309 // The thread switched back to runnable.
310 if (thread->GetState() == kRunnable) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700311 // Spurious fail, try again.
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700312 continue;
313 }
Yu Lieac44242015-06-29 10:50:03 +0800314 thread->ModifySuspendCount(self, +1, nullptr, false);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700315 suspended_count_modified_threads.push_back(thread);
316 break;
317 }
318 }
319 }
320 }
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700321 // Run the callback to be called inside this critical section.
322 if (callback != nullptr) {
323 callback->Run(self);
324 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700325 }
326
327 // Run the checkpoint on ourself while we wait for threads to suspend.
328 checkpoint_function->Run(self);
329
330 // Run the checkpoint on the suspended threads.
Mathieu Chartier02e25112013-08-14 16:14:24 -0700331 for (const auto& thread : suspended_count_modified_threads) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700332 if (!thread->IsSuspended()) {
Mathieu Chartier99143862015-02-03 14:26:46 -0800333 if (ATRACE_ENABLED()) {
334 std::ostringstream oss;
335 thread->ShortDump(oss);
336 ATRACE_BEGIN((std::string("Waiting for suspension of thread ") + oss.str()).c_str());
337 }
338 // Busy wait until the thread is suspended.
339 const uint64_t start_time = NanoTime();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700340 do {
Mathieu Chartier99143862015-02-03 14:26:46 -0800341 ThreadSuspendSleep(kThreadSuspendInitialSleepUs);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700342 } while (!thread->IsSuspended());
Mathieu Chartier99143862015-02-03 14:26:46 -0800343 const uint64_t total_delay = NanoTime() - start_time;
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800344 // Shouldn't need to wait for longer than 1000 microseconds.
Mathieu Chartier99143862015-02-03 14:26:46 -0800345 constexpr uint64_t kLongWaitThreshold = MsToNs(1);
346 ATRACE_END();
347 if (UNLIKELY(total_delay > kLongWaitThreshold)) {
348 LOG(WARNING) << "Long wait of " << PrettyDuration(total_delay) << " for "
349 << *thread << " suspension!";
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700350 }
351 }
352 // We know for sure that the thread is suspended at this point.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700353 checkpoint_function->Run(thread);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700354 {
355 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Yu Lieac44242015-06-29 10:50:03 +0800356 thread->ModifySuspendCount(self, -1, nullptr, false);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700357 }
358 }
359
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800360 {
361 // Imitate ResumeAll, threads may be waiting on Thread::resume_cond_ since we raised their
362 // suspend count. Now the suspend_count_ is lowered so we must do the broadcast.
363 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
364 Thread::resume_cond_->Broadcast(self);
365 }
366
Lei Lidd9943d2015-02-02 14:24:44 +0800367 return count;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700368}
369
Dave Allison39c3bfb2014-01-28 18:33:52 -0800370// Request that a checkpoint function be run on all active (non-suspended)
371// threads. Returns the number of successful requests.
372size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function) {
373 Thread* self = Thread::Current();
Ian Rogers7b078e82014-09-10 14:44:24 -0700374 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
375 Locks::thread_list_lock_->AssertNotHeld(self);
376 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
377 CHECK_NE(self->GetState(), kRunnable);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800378
379 size_t count = 0;
380 {
381 // Call a checkpoint function for each non-suspended thread.
382 MutexLock mu(self, *Locks::thread_list_lock_);
383 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
384 for (const auto& thread : list_) {
385 if (thread != self) {
386 if (thread->RequestCheckpoint(checkpoint_function)) {
387 // This thread will run its checkpoint some time in the near future.
388 count++;
389 }
390 }
391 }
392 }
393
394 // Return the number of threads that will run the checkpoint function.
395 return count;
396}
397
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800398// A checkpoint/suspend-all hybrid to switch thread roots from
399// from-space to to-space refs. Used to synchronize threads at a point
400// to mark the initiation of marking while maintaining the to-space
401// invariant.
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700402size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
403 Closure* flip_callback,
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800404 gc::collector::GarbageCollector* collector) {
405 TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings());
406 const uint64_t start_time = NanoTime();
407 Thread* self = Thread::Current();
408 Locks::mutator_lock_->AssertNotHeld(self);
409 Locks::thread_list_lock_->AssertNotHeld(self);
410 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
411 CHECK_NE(self->GetState(), kRunnable);
412
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700413 collector->GetHeap()->ThreadFlipBegin(self); // Sync with JNI critical calls.
414
Mathieu Chartierb19ccb12015-07-15 10:24:16 -0700415 SuspendAllInternal(self, self, nullptr);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800416
417 // Run the flip callback for the collector.
418 Locks::mutator_lock_->ExclusiveLock(self);
419 flip_callback->Run(self);
420 Locks::mutator_lock_->ExclusiveUnlock(self);
421 collector->RegisterPause(NanoTime() - start_time);
422
423 // Resume runnable threads.
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700424 size_t runnable_thread_count = 0;
Mathieu Chartierb19ccb12015-07-15 10:24:16 -0700425 std::vector<Thread*> other_threads;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800426 {
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700427 TimingLogger::ScopedTiming split2("ResumeRunnableThreads", collector->GetTimings());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800428 MutexLock mu(self, *Locks::thread_list_lock_);
429 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
430 --suspend_all_count_;
431 for (const auto& thread : list_) {
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700432 // Set the flip function for all threads because Thread::DumpState/DumpJavaStack() (invoked by
433 // a checkpoint) may cause the flip function to be run for a runnable/suspended thread before
434 // a runnable thread runs it for itself or we run it for a suspended thread below.
435 thread->SetFlipFunction(thread_flip_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800436 if (thread == self) {
437 continue;
438 }
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700439 // Resume early the threads that were runnable but are suspended just for this thread flip or
440 // about to transition from non-runnable (eg. kNative at the SOA entry in a JNI function) to
441 // runnable (both cases waiting inside Thread::TransitionFromSuspendedToRunnable), or waiting
442 // for the thread flip to end at the JNI critical section entry (kWaitingForGcThreadFlip),
443 ThreadState state = thread->GetState();
444 if (state == kWaitingForGcThreadFlip ||
445 thread->IsTransitioningToRunnable()) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800446 // The thread will resume right after the broadcast.
Yu Lieac44242015-06-29 10:50:03 +0800447 thread->ModifySuspendCount(self, -1, nullptr, false);
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700448 ++runnable_thread_count;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800449 } else {
450 other_threads.push_back(thread);
451 }
452 }
453 Thread::resume_cond_->Broadcast(self);
454 }
455
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700456 collector->GetHeap()->ThreadFlipEnd(self);
457
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800458 // Run the closure on the other threads and let them resume.
459 {
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700460 TimingLogger::ScopedTiming split3("FlipOtherThreads", collector->GetTimings());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800461 ReaderMutexLock mu(self, *Locks::mutator_lock_);
462 for (const auto& thread : other_threads) {
463 Closure* flip_func = thread->GetFlipFunction();
464 if (flip_func != nullptr) {
465 flip_func->Run(thread);
466 }
467 }
468 // Run it for self.
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700469 Closure* flip_func = self->GetFlipFunction();
470 if (flip_func != nullptr) {
471 flip_func->Run(self);
472 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800473 }
474
475 // Resume other threads.
476 {
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700477 TimingLogger::ScopedTiming split4("ResumeOtherThreads", collector->GetTimings());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800478 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
479 for (const auto& thread : other_threads) {
Yu Lieac44242015-06-29 10:50:03 +0800480 thread->ModifySuspendCount(self, -1, nullptr, false);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800481 }
482 Thread::resume_cond_->Broadcast(self);
483 }
484
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700485 return runnable_thread_count + other_threads.size() + 1; // +1 for self.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800486}
487
Mathieu Chartierbf44d422015-06-02 11:42:18 -0700488void ThreadList::SuspendAll(const char* cause, bool long_suspend) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700489 Thread* self = Thread::Current();
490
Jeff Haoc5d824a2014-07-28 18:35:38 -0700491 if (self != nullptr) {
Mathieu Chartierbf9fc582015-03-13 17:21:25 -0700492 VLOG(threads) << *self << " SuspendAll for " << cause << " starting...";
Jeff Haoc5d824a2014-07-28 18:35:38 -0700493 } else {
Mathieu Chartierbf9fc582015-03-13 17:21:25 -0700494 VLOG(threads) << "Thread[null] SuspendAll for " << cause << " starting...";
Jeff Haoc5d824a2014-07-28 18:35:38 -0700495 }
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800496 {
497 ScopedTrace trace("Suspending mutator threads");
498 const uint64_t start_time = NanoTime();
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700499
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800500 SuspendAllInternal(self, self);
501 // All threads are known to have suspended (but a thread may still own the mutator lock)
502 // Make sure this thread grabs exclusive access to the mutator lock and its protected data.
Ian Rogers66aee5c2012-08-15 17:17:47 -0700503#if HAVE_TIMED_RWLOCK
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800504 while (true) {
505 if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self, kThreadSuspendTimeoutMs, 0)) {
506 break;
507 } else if (!long_suspend_) {
508 // Reading long_suspend without the mutator lock is slightly racy, in some rare cases, this
509 // could result in a thread suspend timeout.
510 // Timeout if we wait more than kThreadSuspendTimeoutMs seconds.
511 UnsafeLogFatalForThreadSuspendAllTimeout();
512 }
Mathieu Chartierbf44d422015-06-02 11:42:18 -0700513 }
Ian Rogers66aee5c2012-08-15 17:17:47 -0700514#else
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800515 Locks::mutator_lock_->ExclusiveLock(self);
Ian Rogers66aee5c2012-08-15 17:17:47 -0700516#endif
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700517
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800518 long_suspend_ = long_suspend;
Mathieu Chartierbf44d422015-06-02 11:42:18 -0700519
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800520 const uint64_t end_time = NanoTime();
521 const uint64_t suspend_time = end_time - start_time;
522 suspend_all_historam_.AdjustAndAddValue(suspend_time);
523 if (suspend_time > kLongThreadSuspendThreshold) {
524 LOG(WARNING) << "Suspending all threads took: " << PrettyDuration(suspend_time);
525 }
526
527 if (kDebugLocking) {
528 // Debug check that all threads are suspended.
529 AssertThreadsAreSuspended(self, self);
530 }
Mathieu Chartier251755c2014-07-15 18:10:25 -0700531 }
Mathieu Chartierbf9fc582015-03-13 17:21:25 -0700532 ATRACE_BEGIN((std::string("Mutator threads suspended for ") + cause).c_str());
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700533
Jeff Haoc5d824a2014-07-28 18:35:38 -0700534 if (self != nullptr) {
535 VLOG(threads) << *self << " SuspendAll complete";
536 } else {
537 VLOG(threads) << "Thread[null] SuspendAll complete";
538 }
Elliott Hughes8d768a92011-09-14 16:35:25 -0700539}
540
Yu Lieac44242015-06-29 10:50:03 +0800541// Ensures all threads running Java suspend and that those not running Java don't start.
542// Debugger thread might be set to kRunnable for a short period of time after the
543// SuspendAllInternal. This is safe because it will be set back to suspended state before
544// the SuspendAll returns.
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700545void ThreadList::SuspendAllInternal(Thread* self,
546 Thread* ignore1,
547 Thread* ignore2,
Yu Lieac44242015-06-29 10:50:03 +0800548 bool debug_suspend) {
549 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
550 Locks::thread_list_lock_->AssertNotHeld(self);
551 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
552 if (kDebugLocking && self != nullptr) {
553 CHECK_NE(self->GetState(), kRunnable);
554 }
555
556 // First request that all threads suspend, then wait for them to suspend before
557 // returning. This suspension scheme also relies on other behaviour:
558 // 1. Threads cannot be deleted while they are suspended or have a suspend-
559 // request flag set - (see Unregister() below).
560 // 2. When threads are created, they are created in a suspended state (actually
561 // kNative) and will never begin executing Java code without first checking
562 // the suspend-request flag.
563
564 // The atomic counter for number of threads that need to pass the barrier.
565 AtomicInteger pending_threads;
566 uint32_t num_ignored = 0;
567 if (ignore1 != nullptr) {
568 ++num_ignored;
569 }
570 if (ignore2 != nullptr && ignore1 != ignore2) {
571 ++num_ignored;
572 }
573 {
574 MutexLock mu(self, *Locks::thread_list_lock_);
575 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
576 // Update global suspend all state for attaching threads.
577 ++suspend_all_count_;
578 if (debug_suspend)
579 ++debug_suspend_all_count_;
580 pending_threads.StoreRelaxed(list_.size() - num_ignored);
581 // Increment everybody's suspend count (except those that should be ignored).
582 for (const auto& thread : list_) {
583 if (thread == ignore1 || thread == ignore2) {
584 continue;
585 }
586 VLOG(threads) << "requesting thread suspend: " << *thread;
587 while (true) {
588 if (LIKELY(thread->ModifySuspendCount(self, +1, &pending_threads, debug_suspend))) {
589 break;
590 } else {
591 // Failure means the list of active_suspend_barriers is full, we should release the
592 // thread_suspend_count_lock_ (to avoid deadlock) and wait till the target thread has
593 // executed Thread::PassActiveSuspendBarriers(). Note that we could not simply wait for
594 // the thread to change to a suspended state, because it might need to run checkpoint
595 // function before the state change, which also needs thread_suspend_count_lock_.
596
597 // This is very unlikely to happen since more than kMaxSuspendBarriers threads need to
598 // execute SuspendAllInternal() simultaneously, and target thread stays in kRunnable
599 // in the mean time.
600 Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
601 NanoSleep(100000);
602 Locks::thread_suspend_count_lock_->ExclusiveLock(self);
603 }
604 }
605
606 // Must install the pending_threads counter first, then check thread->IsSuspend() and clear
607 // the counter. Otherwise there's a race with Thread::TransitionFromRunnableToSuspended()
608 // that can lead a thread to miss a call to PassActiveSuspendBarriers().
609 if (thread->IsSuspended()) {
610 // Only clear the counter for the current thread.
611 thread->ClearSuspendBarrier(&pending_threads);
612 pending_threads.FetchAndSubSequentiallyConsistent(1);
613 }
614 }
615 }
616
617 // Wait for the barrier to be passed by all runnable threads. This wait
618 // is done with a timeout so that we can detect problems.
Mathieu Chartier19af1172015-07-14 10:05:45 -0700619#if ART_USE_FUTEXES
Yu Lieac44242015-06-29 10:50:03 +0800620 timespec wait_timeout;
621 InitTimeSpec(true, CLOCK_MONOTONIC, 10000, 0, &wait_timeout);
Mathieu Chartier19af1172015-07-14 10:05:45 -0700622#endif
Yu Lieac44242015-06-29 10:50:03 +0800623 while (true) {
624 int32_t cur_val = pending_threads.LoadRelaxed();
625 if (LIKELY(cur_val > 0)) {
626#if ART_USE_FUTEXES
627 if (futex(pending_threads.Address(), FUTEX_WAIT, cur_val, &wait_timeout, nullptr, 0) != 0) {
628 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
629 if ((errno != EAGAIN) && (errno != EINTR)) {
630 if (errno == ETIMEDOUT) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700631 LOG(kIsDebugBuild ? ::android::base::FATAL : ::android::base::ERROR)
632 << "Unexpected time out during suspend all.";
Yu Lieac44242015-06-29 10:50:03 +0800633 } else {
634 PLOG(FATAL) << "futex wait failed for SuspendAllInternal()";
635 }
636 }
Vladimir Markod778cd62016-07-05 17:29:55 +0100637 } // else re-check pending_threads in the next iteration (this may be a spurious wake-up).
Yu Lieac44242015-06-29 10:50:03 +0800638#else
639 // Spin wait. This is likely to be slow, but on most architecture ART_USE_FUTEXES is set.
640#endif
641 } else {
642 CHECK_EQ(cur_val, 0);
643 break;
644 }
645 }
646}
647
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700648void ThreadList::ResumeAll() {
649 Thread* self = Thread::Current();
650
Jeff Haoc5d824a2014-07-28 18:35:38 -0700651 if (self != nullptr) {
652 VLOG(threads) << *self << " ResumeAll starting";
653 } else {
654 VLOG(threads) << "Thread[null] ResumeAll starting";
655 }
Ian Rogers01ae5802012-09-28 16:14:01 -0700656
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700657 ATRACE_END();
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800658
659 ScopedTrace trace("Resuming mutator threads");
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700660
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800661 if (kDebugLocking) {
662 // Debug check that all threads are suspended.
663 AssertThreadsAreSuspended(self, self);
664 }
Ian Rogers01ae5802012-09-28 16:14:01 -0700665
Mathieu Chartierbf44d422015-06-02 11:42:18 -0700666 long_suspend_ = false;
667
Ian Rogers81d425b2012-09-27 16:03:43 -0700668 Locks::mutator_lock_->ExclusiveUnlock(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700669 {
Ian Rogers81d425b2012-09-27 16:03:43 -0700670 MutexLock mu(self, *Locks::thread_list_lock_);
671 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700672 // Update global suspend all state for attaching threads.
673 --suspend_all_count_;
674 // Decrement the suspend counts for all threads.
Mathieu Chartier02e25112013-08-14 16:14:24 -0700675 for (const auto& thread : list_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700676 if (thread == self) {
677 continue;
678 }
Yu Lieac44242015-06-29 10:50:03 +0800679 thread->ModifySuspendCount(self, -1, nullptr, false);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700680 }
681
682 // Broadcast a notification to all suspended threads, some or all of
683 // which may choose to wake up. No need to wait for them.
Jeff Haoc5d824a2014-07-28 18:35:38 -0700684 if (self != nullptr) {
685 VLOG(threads) << *self << " ResumeAll waking others";
686 } else {
687 VLOG(threads) << "Thread[null] ResumeAll waking others";
688 }
Ian Rogersc604d732012-10-14 16:09:54 -0700689 Thread::resume_cond_->Broadcast(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700690 }
Jeff Haoc5d824a2014-07-28 18:35:38 -0700691
692 if (self != nullptr) {
693 VLOG(threads) << *self << " ResumeAll complete";
694 } else {
695 VLOG(threads) << "Thread[null] ResumeAll complete";
696 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700697}
698
699void ThreadList::Resume(Thread* thread, bool for_debugger) {
Mathieu Chartierf0dc8b52014-12-17 10:13:30 -0800700 // This assumes there was an ATRACE_BEGIN when we suspended the thread.
701 ATRACE_END();
702
Ian Rogers81d425b2012-09-27 16:03:43 -0700703 Thread* self = Thread::Current();
704 DCHECK_NE(thread, self);
Brian Carlstromba32de42014-08-27 23:43:46 -0700705 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") starting..."
706 << (for_debugger ? " (debugger)" : "");
Elliott Hughes01158d72011-09-19 19:47:10 -0700707
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700708 {
709 // To check Contains.
Ian Rogers81d425b2012-09-27 16:03:43 -0700710 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700711 // To check IsSuspended.
Ian Rogers81d425b2012-09-27 16:03:43 -0700712 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
713 DCHECK(thread->IsSuspended());
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700714 if (!Contains(thread)) {
Brian Carlstromba32de42014-08-27 23:43:46 -0700715 // We only expect threads within the thread-list to have been suspended otherwise we can't
716 // stop such threads from delete-ing themselves.
717 LOG(ERROR) << "Resume(" << reinterpret_cast<void*>(thread)
718 << ") thread not within thread list";
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700719 return;
720 }
Yu Lieac44242015-06-29 10:50:03 +0800721 thread->ModifySuspendCount(self, -1, nullptr, for_debugger);
Elliott Hughes01158d72011-09-19 19:47:10 -0700722 }
723
724 {
Brian Carlstromba32de42014-08-27 23:43:46 -0700725 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") waking others";
Ian Rogers81d425b2012-09-27 16:03:43 -0700726 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersc604d732012-10-14 16:09:54 -0700727 Thread::resume_cond_->Broadcast(self);
Elliott Hughes01158d72011-09-19 19:47:10 -0700728 }
729
Brian Carlstromba32de42014-08-27 23:43:46 -0700730 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") complete";
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700731}
Elliott Hughes01158d72011-09-19 19:47:10 -0700732
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700733static void ThreadSuspendByPeerWarning(Thread* self,
734 LogSeverity severity,
735 const char* message,
Ian Rogersc7dd2952014-10-21 23:31:19 -0700736 jobject peer) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700737 JNIEnvExt* env = self->GetJniEnv();
738 ScopedLocalRef<jstring>
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700739 scoped_name_string(env, static_cast<jstring>(env->GetObjectField(
740 peer, WellKnownClasses::java_lang_Thread_name)));
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700741 ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700742 if (scoped_name_chars.c_str() == nullptr) {
Ian Rogersc7dd2952014-10-21 23:31:19 -0700743 LOG(severity) << message << ": " << peer;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700744 env->ExceptionClear();
745 } else {
Ian Rogersc7dd2952014-10-21 23:31:19 -0700746 LOG(severity) << message << ": " << peer << ":" << scoped_name_chars.c_str();
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700747 }
748}
749
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700750Thread* ThreadList::SuspendThreadByPeer(jobject peer,
751 bool request_suspension,
752 bool debug_suspension,
753 bool* timed_out) {
Mathieu Chartier3a958aa2015-02-04 12:52:34 -0800754 const uint64_t start_time = NanoTime();
Mathieu Chartier99143862015-02-03 14:26:46 -0800755 useconds_t sleep_us = kThreadSuspendInitialSleepUs;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700756 *timed_out = false;
Mathieu Chartier99143862015-02-03 14:26:46 -0800757 Thread* const self = Thread::Current();
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800758 Thread* suspended_thread = nullptr;
Brian Carlstromba32de42014-08-27 23:43:46 -0700759 VLOG(threads) << "SuspendThreadByPeer starting";
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700760 while (true) {
761 Thread* thread;
762 {
Ian Rogersf3d874c2014-07-17 18:52:42 -0700763 // Note: this will transition to runnable and potentially suspend. We ensure only one thread
764 // is requesting another suspend, to avoid deadlock, by requiring this function be called
765 // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
766 // than request thread suspension, to avoid potential cycles in threads requesting each other
767 // suspend.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700768 ScopedObjectAccess soa(self);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800769 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700770 thread = Thread::FromManagedThread(soa, peer);
Brian Carlstromba32de42014-08-27 23:43:46 -0700771 if (thread == nullptr) {
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800772 if (suspended_thread != nullptr) {
773 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
774 // If we incremented the suspend count but the thread reset its peer, we need to
775 // re-decrement it since it is shutting down and may deadlock the runtime in
776 // ThreadList::WaitForOtherNonDaemonThreadsToExit.
Yu Lieac44242015-06-29 10:50:03 +0800777 suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800778 }
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700779 ThreadSuspendByPeerWarning(self,
780 ::android::base::WARNING,
781 "No such thread for suspend",
782 peer);
Brian Carlstromba32de42014-08-27 23:43:46 -0700783 return nullptr;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700784 }
Brian Carlstromba32de42014-08-27 23:43:46 -0700785 if (!Contains(thread)) {
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800786 CHECK(suspended_thread == nullptr);
Brian Carlstromba32de42014-08-27 23:43:46 -0700787 VLOG(threads) << "SuspendThreadByPeer failed for unattached thread: "
788 << reinterpret_cast<void*>(thread);
789 return nullptr;
790 }
791 VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700792 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800793 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700794 if (request_suspension) {
Ian Rogers4ad5cd32014-11-11 23:08:07 -0800795 if (self->GetSuspendCount() > 0) {
796 // We hold the suspend count lock but another thread is trying to suspend us. Its not
797 // safe to try to suspend another thread in case we get a cycle. Start the loop again
798 // which will allow this thread to be suspended.
799 continue;
800 }
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800801 CHECK(suspended_thread == nullptr);
802 suspended_thread = thread;
Yu Lieac44242015-06-29 10:50:03 +0800803 suspended_thread->ModifySuspendCount(self, +1, nullptr, debug_suspension);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700804 request_suspension = false;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700805 } else {
806 // If the caller isn't requesting suspension, a suspension should have already occurred.
807 CHECK_GT(thread->GetSuspendCount(), 0);
808 }
809 // IsSuspended on the current thread will fail as the current thread is changed into
810 // Runnable above. As the suspend count is now raised if this is the current thread
811 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
812 // to just explicitly handle the current thread in the callers to this code.
813 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
814 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
815 // count, or else we've waited and it has self suspended) or is the current thread, we're
816 // done.
817 if (thread->IsSuspended()) {
Brian Carlstromba32de42014-08-27 23:43:46 -0700818 VLOG(threads) << "SuspendThreadByPeer thread suspended: " << *thread;
Mathieu Chartierf0dc8b52014-12-17 10:13:30 -0800819 if (ATRACE_ENABLED()) {
820 std::string name;
821 thread->GetThreadName(name);
822 ATRACE_BEGIN(StringPrintf("SuspendThreadByPeer suspended %s for peer=%p", name.c_str(),
823 peer).c_str());
824 }
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700825 return thread;
826 }
Mathieu Chartier99143862015-02-03 14:26:46 -0800827 const uint64_t total_delay = NanoTime() - start_time;
828 if (total_delay >= MsToNs(kThreadSuspendTimeoutMs)) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700829 ThreadSuspendByPeerWarning(self,
830 ::android::base::FATAL,
Andreas Gamped6e54bb2016-09-26 14:07:57 -0700831 "Thread suspension timed out",
832 peer);
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800833 if (suspended_thread != nullptr) {
834 CHECK_EQ(suspended_thread, thread);
Yu Lieac44242015-06-29 10:50:03 +0800835 suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700836 }
837 *timed_out = true;
Brian Carlstromba32de42014-08-27 23:43:46 -0700838 return nullptr;
Mathieu Chartier99143862015-02-03 14:26:46 -0800839 } else if (sleep_us == 0 &&
840 total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) {
841 // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent
842 // excessive CPU usage.
843 sleep_us = kThreadSuspendMaxYieldUs / 2;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700844 }
845 }
846 // Release locks and come out of runnable state.
847 }
Mathieu Chartier99143862015-02-03 14:26:46 -0800848 VLOG(threads) << "SuspendThreadByPeer waiting to allow thread chance to suspend";
849 ThreadSuspendSleep(sleep_us);
850 // This may stay at 0 if sleep_us == 0, but this is WAI since we want to avoid using usleep at
851 // all if possible. This shouldn't be an issue since time to suspend should always be small.
852 sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700853 }
854}
855
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700856static void ThreadSuspendByThreadIdWarning(LogSeverity severity,
857 const char* message,
Ian Rogersc7dd2952014-10-21 23:31:19 -0700858 uint32_t thread_id) {
859 LOG(severity) << StringPrintf("%s: %d", message, thread_id);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700860}
861
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700862Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id,
863 bool debug_suspension,
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700864 bool* timed_out) {
Mathieu Chartier3a958aa2015-02-04 12:52:34 -0800865 const uint64_t start_time = NanoTime();
Mathieu Chartier99143862015-02-03 14:26:46 -0800866 useconds_t sleep_us = kThreadSuspendInitialSleepUs;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700867 *timed_out = false;
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800868 Thread* suspended_thread = nullptr;
Mathieu Chartier99143862015-02-03 14:26:46 -0800869 Thread* const self = Thread::Current();
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700870 CHECK_NE(thread_id, kInvalidThreadId);
Brian Carlstromba32de42014-08-27 23:43:46 -0700871 VLOG(threads) << "SuspendThreadByThreadId starting";
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700872 while (true) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700873 {
Ian Rogersf3d874c2014-07-17 18:52:42 -0700874 // Note: this will transition to runnable and potentially suspend. We ensure only one thread
875 // is requesting another suspend, to avoid deadlock, by requiring this function be called
876 // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
877 // than request thread suspension, to avoid potential cycles in threads requesting each other
878 // suspend.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700879 ScopedObjectAccess soa(self);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800880 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Ian Rogersf3d874c2014-07-17 18:52:42 -0700881 Thread* thread = nullptr;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700882 for (const auto& it : list_) {
883 if (it->GetThreadId() == thread_id) {
884 thread = it;
885 break;
886 }
887 }
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800888 if (thread == nullptr) {
889 CHECK(suspended_thread == nullptr) << "Suspended thread " << suspended_thread
890 << " no longer in thread list";
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700891 // There's a race in inflating a lock and the owner giving up ownership and then dying.
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700892 ThreadSuspendByThreadIdWarning(::android::base::WARNING,
893 "No such thread id for suspend",
894 thread_id);
Brian Carlstromba32de42014-08-27 23:43:46 -0700895 return nullptr;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700896 }
Brian Carlstromba32de42014-08-27 23:43:46 -0700897 VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread;
898 DCHECK(Contains(thread));
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700899 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800900 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800901 if (suspended_thread == nullptr) {
Ian Rogers4ad5cd32014-11-11 23:08:07 -0800902 if (self->GetSuspendCount() > 0) {
903 // We hold the suspend count lock but another thread is trying to suspend us. Its not
904 // safe to try to suspend another thread in case we get a cycle. Start the loop again
905 // which will allow this thread to be suspended.
906 continue;
907 }
Yu Lieac44242015-06-29 10:50:03 +0800908 thread->ModifySuspendCount(self, +1, nullptr, debug_suspension);
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800909 suspended_thread = thread;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700910 } else {
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800911 CHECK_EQ(suspended_thread, thread);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700912 // If the caller isn't requesting suspension, a suspension should have already occurred.
913 CHECK_GT(thread->GetSuspendCount(), 0);
914 }
915 // IsSuspended on the current thread will fail as the current thread is changed into
916 // Runnable above. As the suspend count is now raised if this is the current thread
917 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
918 // to just explicitly handle the current thread in the callers to this code.
919 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
920 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
921 // count, or else we've waited and it has self suspended) or is the current thread, we're
922 // done.
923 if (thread->IsSuspended()) {
Mathieu Chartierf0dc8b52014-12-17 10:13:30 -0800924 if (ATRACE_ENABLED()) {
925 std::string name;
926 thread->GetThreadName(name);
927 ATRACE_BEGIN(StringPrintf("SuspendThreadByThreadId suspended %s id=%d",
928 name.c_str(), thread_id).c_str());
929 }
Brian Carlstromba32de42014-08-27 23:43:46 -0700930 VLOG(threads) << "SuspendThreadByThreadId thread suspended: " << *thread;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700931 return thread;
932 }
Mathieu Chartier99143862015-02-03 14:26:46 -0800933 const uint64_t total_delay = NanoTime() - start_time;
934 if (total_delay >= MsToNs(kThreadSuspendTimeoutMs)) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700935 ThreadSuspendByThreadIdWarning(::android::base::WARNING,
936 "Thread suspension timed out",
937 thread_id);
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800938 if (suspended_thread != nullptr) {
Yu Lieac44242015-06-29 10:50:03 +0800939 thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700940 }
941 *timed_out = true;
Brian Carlstromba32de42014-08-27 23:43:46 -0700942 return nullptr;
Mathieu Chartier99143862015-02-03 14:26:46 -0800943 } else if (sleep_us == 0 &&
944 total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) {
945 // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent
946 // excessive CPU usage.
947 sleep_us = kThreadSuspendMaxYieldUs / 2;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700948 }
949 }
950 // Release locks and come out of runnable state.
951 }
Mathieu Chartier99143862015-02-03 14:26:46 -0800952 VLOG(threads) << "SuspendThreadByThreadId waiting to allow thread chance to suspend";
953 ThreadSuspendSleep(sleep_us);
954 sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700955 }
956}
957
Mathieu Chartier61b3cd42016-04-18 11:43:29 -0700958Thread* ThreadList::FindThreadByThreadId(uint32_t thread_id) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700959 for (const auto& thread : list_) {
Mathieu Chartier61b3cd42016-04-18 11:43:29 -0700960 if (thread->GetThreadId() == thread_id) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700961 return thread;
962 }
963 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700964 return nullptr;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700965}
966
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700967void ThreadList::SuspendAllForDebugger() {
968 Thread* self = Thread::Current();
969 Thread* debug_thread = Dbg::GetDebugThread();
970
971 VLOG(threads) << *self << " SuspendAllForDebugger starting...";
972
Yu Lieac44242015-06-29 10:50:03 +0800973 SuspendAllInternal(self, self, debug_thread, true);
Ian Rogers66aee5c2012-08-15 17:17:47 -0700974 // Block on the mutator lock until all Runnable threads release their share of access then
975 // immediately unlock again.
976#if HAVE_TIMED_RWLOCK
977 // Timeout if we wait more than 30 seconds.
Ian Rogersc604d732012-10-14 16:09:54 -0700978 if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0)) {
Sebastien Hertzbae182c2013-12-17 10:42:03 +0100979 UnsafeLogFatalForThreadSuspendAllTimeout();
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700980 } else {
Ian Rogers81d425b2012-09-27 16:03:43 -0700981 Locks::mutator_lock_->ExclusiveUnlock(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700982 }
Ian Rogers66aee5c2012-08-15 17:17:47 -0700983#else
Ian Rogers81d425b2012-09-27 16:03:43 -0700984 Locks::mutator_lock_->ExclusiveLock(self);
985 Locks::mutator_lock_->ExclusiveUnlock(self);
Ian Rogers66aee5c2012-08-15 17:17:47 -0700986#endif
Mathieu Chartier9450c6c2015-11-07 11:55:23 -0800987 // Disabled for the following race condition:
988 // Thread 1 calls SuspendAllForDebugger, gets preempted after pulsing the mutator lock.
989 // Thread 2 calls SuspendAll and SetStateUnsafe (perhaps from Dbg::Disconnected).
990 // Thread 1 fails assertion that all threads are suspended due to thread 2 being in a runnable
991 // state (from SetStateUnsafe).
992 // AssertThreadsAreSuspended(self, self, debug_thread);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700993
Sebastien Hertzed2be172014-08-19 15:33:43 +0200994 VLOG(threads) << *self << " SuspendAllForDebugger complete";
Elliott Hughes01158d72011-09-19 19:47:10 -0700995}
996
Elliott Hughes475fc232011-10-25 15:00:35 -0700997void ThreadList::SuspendSelfForDebugger() {
Sebastien Hertz1558b572015-02-25 15:05:59 +0100998 Thread* const self = Thread::Current();
999 self->SetReadyForDebugInvoke(true);
Elliott Hughes01158d72011-09-19 19:47:10 -07001000
Elliott Hughes475fc232011-10-25 15:00:35 -07001001 // The debugger thread must not suspend itself due to debugger activity!
1002 Thread* debug_thread = Dbg::GetDebugThread();
Elliott Hughes475fc232011-10-25 15:00:35 -07001003 CHECK(self != debug_thread);
jeffhaoa77f0f62012-12-05 17:19:31 -08001004 CHECK_NE(self->GetState(), kRunnable);
1005 Locks::mutator_lock_->AssertNotHeld(self);
Elliott Hughes475fc232011-10-25 15:00:35 -07001006
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001007 // The debugger may have detached while we were executing an invoke request. In that case, we
1008 // must not suspend ourself.
1009 DebugInvokeReq* pReq = self->GetInvokeReq();
1010 const bool skip_thread_suspension = (pReq != nullptr && !Dbg::IsDebuggerActive());
1011 if (!skip_thread_suspension) {
jeffhaoa77f0f62012-12-05 17:19:31 -08001012 // Collisions with other suspends aren't really interesting. We want
1013 // to ensure that we're the only one fiddling with the suspend count
1014 // though.
1015 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Yu Lieac44242015-06-29 10:50:03 +08001016 self->ModifySuspendCount(self, +1, nullptr, true);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001017 CHECK_GT(self->GetSuspendCount(), 0);
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001018
1019 VLOG(threads) << *self << " self-suspending (debugger)";
1020 } else {
1021 // We must no longer be subject to debugger suspension.
1022 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1023 CHECK_EQ(self->GetDebugSuspendCount(), 0) << "Debugger detached without resuming us";
1024
1025 VLOG(threads) << *self << " not self-suspending because debugger detached during invoke";
jeffhaoa77f0f62012-12-05 17:19:31 -08001026 }
Elliott Hughes475fc232011-10-25 15:00:35 -07001027
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001028 // If the debugger requested an invoke, we need to send the reply and clear the request.
Sebastien Hertz1558b572015-02-25 15:05:59 +01001029 if (pReq != nullptr) {
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001030 Dbg::FinishInvokeMethod(pReq);
Sebastien Hertz1558b572015-02-25 15:05:59 +01001031 self->ClearDebugInvokeReq();
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001032 pReq = nullptr; // object has been deleted, clear it for safety.
Sebastien Hertz21e729c2014-02-18 14:16:00 +01001033 }
1034
Elliott Hughes475fc232011-10-25 15:00:35 -07001035 // Tell JDWP that we've completed suspension. The JDWP thread can't
1036 // tell us to resume before we're fully asleep because we hold the
1037 // suspend count lock.
1038 Dbg::ClearWaitForEventThread();
1039
jeffhaoa77f0f62012-12-05 17:19:31 -08001040 {
1041 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001042 while (self->GetSuspendCount() != 0) {
jeffhaoa77f0f62012-12-05 17:19:31 -08001043 Thread::resume_cond_->Wait(self);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001044 if (self->GetSuspendCount() != 0) {
jeffhaoa77f0f62012-12-05 17:19:31 -08001045 // The condition was signaled but we're still suspended. This
Sebastien Hertzf272af42014-09-18 10:20:42 +02001046 // can happen when we suspend then resume all threads to
1047 // update instrumentation or compute monitor info. This can
1048 // also happen if the debugger lets go while a SIGQUIT thread
jeffhaoa77f0f62012-12-05 17:19:31 -08001049 // dump event is pending (assuming SignalCatcher was resumed for
1050 // just long enough to try to grab the thread-suspend lock).
Sebastien Hertzf272af42014-09-18 10:20:42 +02001051 VLOG(jdwp) << *self << " still suspended after undo "
1052 << "(suspend count=" << self->GetSuspendCount() << ", "
1053 << "debug suspend count=" << self->GetDebugSuspendCount() << ")";
jeffhaoa77f0f62012-12-05 17:19:31 -08001054 }
Elliott Hughes475fc232011-10-25 15:00:35 -07001055 }
Ian Rogersdd7624d2014-03-14 17:43:00 -07001056 CHECK_EQ(self->GetSuspendCount(), 0);
Elliott Hughes475fc232011-10-25 15:00:35 -07001057 }
jeffhaoa77f0f62012-12-05 17:19:31 -08001058
Sebastien Hertz1558b572015-02-25 15:05:59 +01001059 self->SetReadyForDebugInvoke(false);
Elliott Hughes1f729aa2012-03-02 13:55:41 -08001060 VLOG(threads) << *self << " self-reviving (debugger)";
Elliott Hughes475fc232011-10-25 15:00:35 -07001061}
1062
Sebastien Hertz253fa552014-10-14 17:27:15 +02001063void ThreadList::ResumeAllForDebugger() {
1064 Thread* self = Thread::Current();
1065 Thread* debug_thread = Dbg::GetDebugThread();
Sebastien Hertz253fa552014-10-14 17:27:15 +02001066
1067 VLOG(threads) << *self << " ResumeAllForDebugger starting...";
1068
1069 // Threads can't resume if we exclusively hold the mutator lock.
1070 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
1071
1072 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001073 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Sebastien Hertz253fa552014-10-14 17:27:15 +02001074 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001075 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
Sebastien Hertz253fa552014-10-14 17:27:15 +02001076 // Update global suspend all state for attaching threads.
1077 DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001078 if (debug_suspend_all_count_ > 0) {
Sebastien Hertz253fa552014-10-14 17:27:15 +02001079 --suspend_all_count_;
1080 --debug_suspend_all_count_;
Sebastien Hertz253fa552014-10-14 17:27:15 +02001081 } else {
1082 // We've been asked to resume all threads without being asked to
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001083 // suspend them all before. That may happen if a debugger tries
1084 // to resume some suspended threads (with suspend count == 1)
1085 // at once with a VirtualMachine.Resume command. Let's print a
1086 // warning.
Sebastien Hertz253fa552014-10-14 17:27:15 +02001087 LOG(WARNING) << "Debugger attempted to resume all threads without "
1088 << "having suspended them all before.";
1089 }
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001090 // Decrement everybody's suspend count (except our own).
1091 for (const auto& thread : list_) {
1092 if (thread == self || thread == debug_thread) {
1093 continue;
1094 }
1095 if (thread->GetDebugSuspendCount() == 0) {
1096 // This thread may have been individually resumed with ThreadReference.Resume.
1097 continue;
1098 }
1099 VLOG(threads) << "requesting thread resume: " << *thread;
Yu Lieac44242015-06-29 10:50:03 +08001100 thread->ModifySuspendCount(self, -1, nullptr, true);
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001101 }
Sebastien Hertz253fa552014-10-14 17:27:15 +02001102 }
1103 }
1104
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001105 {
Sebastien Hertz253fa552014-10-14 17:27:15 +02001106 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1107 Thread::resume_cond_->Broadcast(self);
1108 }
1109
1110 VLOG(threads) << *self << " ResumeAllForDebugger complete";
1111}
1112
Elliott Hughes234ab152011-10-26 14:02:26 -07001113void ThreadList::UndoDebuggerSuspensions() {
1114 Thread* self = Thread::Current();
1115
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08001116 VLOG(threads) << *self << " UndoDebuggerSuspensions starting";
Elliott Hughes234ab152011-10-26 14:02:26 -07001117
1118 {
Ian Rogers81d425b2012-09-27 16:03:43 -07001119 MutexLock mu(self, *Locks::thread_list_lock_);
1120 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001121 // Update global suspend all state for attaching threads.
1122 suspend_all_count_ -= debug_suspend_all_count_;
1123 debug_suspend_all_count_ = 0;
1124 // Update running threads.
Mathieu Chartier02e25112013-08-14 16:14:24 -07001125 for (const auto& thread : list_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001126 if (thread == self || thread->GetDebugSuspendCount() == 0) {
Elliott Hughes234ab152011-10-26 14:02:26 -07001127 continue;
1128 }
Yu Lieac44242015-06-29 10:50:03 +08001129 thread->ModifySuspendCount(self, -thread->GetDebugSuspendCount(), nullptr, true);
Elliott Hughes234ab152011-10-26 14:02:26 -07001130 }
1131 }
1132
1133 {
Ian Rogers81d425b2012-09-27 16:03:43 -07001134 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersc604d732012-10-14 16:09:54 -07001135 Thread::resume_cond_->Broadcast(self);
Elliott Hughes234ab152011-10-26 14:02:26 -07001136 }
1137
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08001138 VLOG(threads) << "UndoDebuggerSuspensions(" << *self << ") complete";
Elliott Hughes234ab152011-10-26 14:02:26 -07001139}
1140
Elliott Hughese52e49b2012-04-02 16:05:44 -07001141void ThreadList::WaitForOtherNonDaemonThreadsToExit() {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001142 ScopedTrace trace(__PRETTY_FUNCTION__);
Ian Rogers81d425b2012-09-27 16:03:43 -07001143 Thread* self = Thread::Current();
1144 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartier91e56692015-03-03 13:51:04 -08001145 while (true) {
Ian Rogers120f1c72012-09-28 17:17:10 -07001146 {
1147 // No more threads can be born after we start to shutdown.
1148 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001149 CHECK(Runtime::Current()->IsShuttingDownLocked());
Ian Rogers120f1c72012-09-28 17:17:10 -07001150 CHECK_EQ(Runtime::Current()->NumberOfThreadsBeingBorn(), 0U);
1151 }
Ian Rogers120f1c72012-09-28 17:17:10 -07001152 MutexLock mu(self, *Locks::thread_list_lock_);
Mathieu Chartier91e56692015-03-03 13:51:04 -08001153 // Also wait for any threads that are unregistering to finish. This is required so that no
1154 // threads access the thread list after it is deleted. TODO: This may not work for user daemon
1155 // threads since they could unregister at the wrong time.
1156 bool done = unregistering_count_ == 0;
1157 if (done) {
1158 for (const auto& thread : list_) {
1159 if (thread != self && !thread->IsDaemon()) {
1160 done = false;
1161 break;
1162 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001163 }
1164 }
Mathieu Chartier91e56692015-03-03 13:51:04 -08001165 if (done) {
1166 break;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001167 }
Mathieu Chartier91e56692015-03-03 13:51:04 -08001168 // Wait for another thread to exit before re-checking.
1169 Locks::thread_exit_cond_->Wait(self);
1170 }
Elliott Hughes038a8062011-09-18 14:12:41 -07001171}
1172
Mathieu Chartier4d87df62016-01-07 15:14:19 -08001173void ThreadList::SuspendAllDaemonThreadsForShutdown() {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001174 ScopedTrace trace(__PRETTY_FUNCTION__);
Ian Rogers81d425b2012-09-27 16:03:43 -07001175 Thread* self = Thread::Current();
Mathieu Chartier62597d12016-01-11 10:19:06 -08001176 size_t daemons_left = 0;
Nicolas Geoffrayaa45daa2016-06-20 15:58:32 +01001177 {
1178 // Tell all the daemons it's time to suspend.
1179 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers81d425b2012-09-27 16:03:43 -07001180 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001181 for (const auto& thread : list_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001182 // This is only run after all non-daemon threads have exited, so the remainder should all be
1183 // daemons.
Ian Rogers7e762862012-10-22 15:45:08 -07001184 CHECK(thread->IsDaemon()) << *thread;
Ian Rogers81d425b2012-09-27 16:03:43 -07001185 if (thread != self) {
Yu Lieac44242015-06-29 10:50:03 +08001186 thread->ModifySuspendCount(self, +1, nullptr, false);
Mathieu Chartier62597d12016-01-11 10:19:06 -08001187 ++daemons_left;
Elliott Hughese52e49b2012-04-02 16:05:44 -07001188 }
Mathieu Chartier4d87df62016-01-07 15:14:19 -08001189 // We are shutting down the runtime, set the JNI functions of all the JNIEnvs to be
1190 // the sleep forever one.
1191 thread->GetJniEnv()->SetFunctionsToRuntimeShutdownFunctions();
Elliott Hughes038a8062011-09-18 14:12:41 -07001192 }
1193 }
Mathieu Chartier62597d12016-01-11 10:19:06 -08001194 // If we have any daemons left, wait 200ms to ensure they are not stuck in a place where they
1195 // are about to access runtime state and are not in a runnable state. Examples: Monitor code
1196 // or waking up from a condition variable. TODO: Try and see if there is a better way to wait
1197 // for daemon threads to be in a blocked state.
1198 if (daemons_left > 0) {
1199 static constexpr size_t kDaemonSleepTime = 200 * 1000;
1200 usleep(kDaemonSleepTime);
1201 }
Elliott Hughes038a8062011-09-18 14:12:41 -07001202 // Give the threads a chance to suspend, complaining if they're slow.
1203 bool have_complained = false;
Mathieu Chartierba098ba2016-01-07 09:31:33 -08001204 static constexpr size_t kTimeoutMicroseconds = 2000 * 1000;
1205 static constexpr size_t kSleepMicroseconds = 1000;
1206 for (size_t i = 0; i < kTimeoutMicroseconds / kSleepMicroseconds; ++i) {
Elliott Hughes038a8062011-09-18 14:12:41 -07001207 bool all_suspended = true;
Nicolas Geoffrayaa45daa2016-06-20 15:58:32 +01001208 {
1209 MutexLock mu(self, *Locks::thread_list_lock_);
1210 for (const auto& thread : list_) {
1211 if (thread != self && thread->GetState() == kRunnable) {
1212 if (!have_complained) {
1213 LOG(WARNING) << "daemon thread not yet suspended: " << *thread;
1214 have_complained = true;
1215 }
1216 all_suspended = false;
Elliott Hughes038a8062011-09-18 14:12:41 -07001217 }
Elliott Hughes038a8062011-09-18 14:12:41 -07001218 }
1219 }
1220 if (all_suspended) {
1221 return;
1222 }
Mathieu Chartierba098ba2016-01-07 09:31:33 -08001223 usleep(kSleepMicroseconds);
Elliott Hughes038a8062011-09-18 14:12:41 -07001224 }
Mathieu Chartierba098ba2016-01-07 09:31:33 -08001225 LOG(WARNING) << "timed out suspending all daemon threads";
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001226}
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001227
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001228void ThreadList::Register(Thread* self) {
1229 DCHECK_EQ(self, Thread::Current());
1230
1231 if (VLOG_IS_ON(threads)) {
1232 std::ostringstream oss;
1233 self->ShortDump(oss); // We don't hold the mutator_lock_ yet and so cannot call Dump.
Ian Rogers5a9ba012014-05-19 13:28:52 -07001234 LOG(INFO) << "ThreadList::Register() " << *self << "\n" << oss.str();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001235 }
1236
1237 // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing
1238 // SuspendAll requests.
Ian Rogers81d425b2012-09-27 16:03:43 -07001239 MutexLock mu(self, *Locks::thread_list_lock_);
1240 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001241 CHECK_GE(suspend_all_count_, debug_suspend_all_count_);
Ian Rogers2966e132014-04-02 08:34:36 -07001242 // Modify suspend count in increments of 1 to maintain invariants in ModifySuspendCount. While
1243 // this isn't particularly efficient the suspend counts are most commonly 0 or 1.
1244 for (int delta = debug_suspend_all_count_; delta > 0; delta--) {
Yu Lieac44242015-06-29 10:50:03 +08001245 self->ModifySuspendCount(self, +1, nullptr, true);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001246 }
Ian Rogers2966e132014-04-02 08:34:36 -07001247 for (int delta = suspend_all_count_ - debug_suspend_all_count_; delta > 0; delta--) {
Yu Lieac44242015-06-29 10:50:03 +08001248 self->ModifySuspendCount(self, +1, nullptr, false);
Ian Rogers01ae5802012-09-28 16:14:01 -07001249 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001250 CHECK(!Contains(self));
1251 list_.push_back(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001252 if (kUseReadBarrier) {
Hiroshi Yamauchi00370822015-08-18 14:47:25 -07001253 // Initialize according to the state of the CC collector.
1254 bool is_gc_marking =
1255 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking();
1256 self->SetIsGcMarking(is_gc_marking);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001257 bool weak_ref_access_enabled =
1258 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsWeakRefAccessEnabled();
1259 self->SetWeakRefAccessEnabled(weak_ref_access_enabled);
1260 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001261}
1262
1263void ThreadList::Unregister(Thread* self) {
1264 DCHECK_EQ(self, Thread::Current());
Ian Rogers68d8b422014-07-17 11:09:10 -07001265 CHECK_NE(self->GetState(), kRunnable);
1266 Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001267
1268 VLOG(threads) << "ThreadList::Unregister() " << *self;
1269
Mathieu Chartier91e56692015-03-03 13:51:04 -08001270 {
1271 MutexLock mu(self, *Locks::thread_list_lock_);
1272 ++unregistering_count_;
1273 }
1274
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001275 // Any time-consuming destruction, plus anything that can call back into managed code or
Mathieu Chartier91e56692015-03-03 13:51:04 -08001276 // suspend and so on, must happen at this point, and not in ~Thread. The self->Destroy is what
1277 // causes the threads to join. It is important to do this after incrementing unregistering_count_
1278 // since we want the runtime to wait for the daemon threads to exit before deleting the thread
1279 // list.
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001280 self->Destroy();
1281
Jeff Haoe094b872014-10-14 13:12:01 -07001282 // If tracing, remember thread id and name before thread exits.
1283 Trace::StoreExitingThreadInfo(self);
1284
Ian Rogersdd7624d2014-03-14 17:43:00 -07001285 uint32_t thin_lock_id = self->GetThreadId();
Mathieu Chartier91e56692015-03-03 13:51:04 -08001286 while (true) {
Ian Rogerscfaa4552012-11-26 21:00:08 -08001287 // Remove and delete the Thread* while holding the thread_list_lock_ and
1288 // thread_suspend_count_lock_ so that the unregistering thread cannot be suspended.
Ian Rogers0878d652013-04-18 17:38:35 -07001289 // Note: deliberately not using MutexLock that could hold a stale self pointer.
Mathieu Chartier91e56692015-03-03 13:51:04 -08001290 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogersa2af5c72014-09-15 15:17:07 -07001291 if (!Contains(self)) {
Mathieu Chartier9db831a2015-02-24 17:20:30 -08001292 std::string thread_name;
1293 self->GetThreadName(thread_name);
Ian Rogersa2af5c72014-09-15 15:17:07 -07001294 std::ostringstream os;
Christopher Ferris6cff48f2014-01-26 21:36:13 -08001295 DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr);
Mathieu Chartier9db831a2015-02-24 17:20:30 -08001296 LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
Mathieu Chartier91e56692015-03-03 13:51:04 -08001297 break;
Ian Rogersa2af5c72014-09-15 15:17:07 -07001298 } else {
Mathieu Chartier91e56692015-03-03 13:51:04 -08001299 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogersa2af5c72014-09-15 15:17:07 -07001300 if (!self->IsSuspended()) {
1301 list_.remove(self);
Mathieu Chartier91e56692015-03-03 13:51:04 -08001302 break;
Ian Rogersa2af5c72014-09-15 15:17:07 -07001303 }
Ian Rogers68d8b422014-07-17 11:09:10 -07001304 }
Mathieu Chartier91e56692015-03-03 13:51:04 -08001305 // We failed to remove the thread due to a suspend request, loop and try again.
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001306 }
Mathieu Chartier91e56692015-03-03 13:51:04 -08001307 delete self;
1308
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -08001309 // Release the thread ID after the thread is finished and deleted to avoid cases where we can
1310 // temporarily have multiple threads with the same thread id. When this occurs, it causes
1311 // problems in FindThreadByThreadId / SuspendThreadByThreadId.
1312 ReleaseThreadId(nullptr, thin_lock_id);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001313
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001314 // Clear the TLS data, so that the underlying native thread is recognizably detached.
1315 // (It may wish to reattach later.)
Bilyan Borisovbb661c02016-04-04 16:27:32 +01001316#ifdef ART_TARGET_ANDROID
Andreas Gampe4382f1e2015-08-05 01:08:53 +00001317 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = nullptr;
1318#else
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001319 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
Andreas Gampe4382f1e2015-08-05 01:08:53 +00001320#endif
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001321
1322 // Signal that a thread just detached.
Mathieu Chartier91e56692015-03-03 13:51:04 -08001323 MutexLock mu(nullptr, *Locks::thread_list_lock_);
1324 --unregistering_count_;
1325 Locks::thread_exit_cond_->Broadcast(nullptr);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001326}
1327
1328void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001329 for (const auto& thread : list_) {
1330 callback(thread, context);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001331 }
1332}
1333
Mathieu Chartierf8a86b92016-06-14 17:08:47 -07001334void ThreadList::VisitRootsForSuspendedThreads(RootVisitor* visitor) {
1335 Thread* const self = Thread::Current();
1336 std::vector<Thread*> threads_to_visit;
1337
1338 // Tell threads to suspend and copy them into list.
1339 {
1340 MutexLock mu(self, *Locks::thread_list_lock_);
1341 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1342 for (Thread* thread : list_) {
1343 thread->ModifySuspendCount(self, +1, nullptr, false);
1344 if (thread == self || thread->IsSuspended()) {
1345 threads_to_visit.push_back(thread);
1346 } else {
1347 thread->ModifySuspendCount(self, -1, nullptr, false);
1348 }
1349 }
1350 }
1351
1352 // Visit roots without holding thread_list_lock_ and thread_suspend_count_lock_ to prevent lock
1353 // order violations.
1354 for (Thread* thread : threads_to_visit) {
1355 thread->VisitRoots(visitor);
1356 }
1357
1358 // Restore suspend counts.
1359 {
1360 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1361 for (Thread* thread : threads_to_visit) {
1362 thread->ModifySuspendCount(self, -1, nullptr, false);
1363 }
1364 }
1365}
1366
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001367void ThreadList::VisitRoots(RootVisitor* visitor) const {
Ian Rogers81d425b2012-09-27 16:03:43 -07001368 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001369 for (const auto& thread : list_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001370 thread->VisitRoots(visitor);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001371 }
Elliott Hughes038a8062011-09-18 14:12:41 -07001372}
1373
Ian Rogerscfaa4552012-11-26 21:00:08 -08001374uint32_t ThreadList::AllocThreadId(Thread* self) {
Chao-ying Fu9e369312014-05-21 11:20:52 -07001375 MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
Elliott Hughes8daa0922011-09-11 13:46:25 -07001376 for (size_t i = 0; i < allocated_ids_.size(); ++i) {
1377 if (!allocated_ids_[i]) {
1378 allocated_ids_.set(i);
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001379 return i + 1; // Zero is reserved to mean "invalid".
Elliott Hughes8daa0922011-09-11 13:46:25 -07001380 }
1381 }
1382 LOG(FATAL) << "Out of internal thread ids";
1383 return 0;
1384}
1385
Ian Rogerscfaa4552012-11-26 21:00:08 -08001386void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
Chao-ying Fu9e369312014-05-21 11:20:52 -07001387 MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001388 --id; // Zero is reserved to mean "invalid".
Elliott Hughes8daa0922011-09-11 13:46:25 -07001389 DCHECK(allocated_ids_[id]) << id;
1390 allocated_ids_.reset(id);
1391}
1392
Mathieu Chartier4f55e222015-09-04 13:26:21 -07001393ScopedSuspendAll::ScopedSuspendAll(const char* cause, bool long_suspend) {
1394 Runtime::Current()->GetThreadList()->SuspendAll(cause, long_suspend);
1395}
1396
1397ScopedSuspendAll::~ScopedSuspendAll() {
1398 Runtime::Current()->GetThreadList()->ResumeAll();
1399}
1400
Elliott Hughes8daa0922011-09-11 13:46:25 -07001401} // namespace art