blob: 2f8a9a77f9e3d1e9b3a0e9bc1c61771c230590ec [file] [log] [blame]
Elliott Hughes8d768a92011-09-14 16:35:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapirob5573532011-07-12 18:22:59 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "thread.h"
Carl Shapirob5573532011-07-12 18:22:59 -070018
Elliott Hughes8d768a92011-09-14 16:35:25 -070019#include <dynamic_annotations.h>
Ian Rogersb033c752011-07-20 12:22:35 -070020#include <pthread.h>
Elliott Hughes2acf36d2012-04-17 13:30:13 -070021#include <signal.h>
Brian Carlstromdbf05b72011-12-15 00:55:24 -080022#include <sys/resource.h>
23#include <sys/time.h>
Elliott Hughesa0957642011-09-02 14:27:33 -070024
Carl Shapirob5573532011-07-12 18:22:59 -070025#include <algorithm>
Elliott Hughesdcc24742011-09-07 14:02:44 -070026#include <bitset>
Elliott Hugheseb4f6142011-07-15 17:43:51 -070027#include <cerrno>
Elliott Hughesa0957642011-09-02 14:27:33 -070028#include <iostream>
Carl Shapirob5573532011-07-12 18:22:59 -070029#include <list>
Carl Shapirob5573532011-07-12 18:22:59 -070030
Elliott Hughesa5b897e2011-08-16 11:33:06 -070031#include "class_linker.h"
Brian Carlstromdf143242011-10-10 18:05:34 -070032#include "class_loader.h"
Ian Rogers474b6da2012-09-25 00:20:38 -070033#include "cutils/atomic.h"
34#include "cutils/atomic-inline.h"
Elliott Hughes46e251b2012-05-22 15:10:45 -070035#include "debugger.h"
Ian Rogers0c7abda2012-09-19 13:33:42 -070036#include "gc_map.h"
Ian Rogers408f79a2011-08-23 18:22:33 -070037#include "heap.h"
Elliott Hughesc5f7c912011-08-18 14:00:42 -070038#include "jni_internal.h"
Elliott Hughes8e4aac52011-09-26 17:03:36 -070039#include "monitor.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070040#include "mutex.h"
Ian Rogers57b86d42012-03-27 16:05:41 -070041#include "oat/runtime/context.h"
Elliott Hughesa5b897e2011-08-16 11:33:06 -070042#include "object.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080043#include "object_utils.h"
Jesse Wilson9a6bae82011-11-14 14:57:30 -050044#include "reflection.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070045#include "runtime.h"
buzbee54330722011-08-23 16:46:55 -070046#include "runtime_support.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070047#include "scoped_thread_state_change.h"
Elliott Hughes46e251b2012-05-22 15:10:45 -070048#include "ScopedLocalRef.h"
Ian Rogers1f539342012-10-03 21:09:42 -070049#include "sirt_ref.h"
Mathieu Chartier7469ebf2012-09-24 16:28:36 -070050#include "gc/space.h"
Elliott Hughes68e76522011-10-05 13:22:16 -070051#include "stack.h"
52#include "stack_indirect_reference_table.h"
Elliott Hughes8daa0922011-09-11 13:46:25 -070053#include "thread_list.h"
Elliott Hughesa0957642011-09-02 14:27:33 -070054#include "utils.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070055#include "well_known_classes.h"
Carl Shapirob5573532011-07-12 18:22:59 -070056
57namespace art {
58
59pthread_key_t Thread::pthread_key_self_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -070060ConditionVariable* Thread::resume_cond_;
Carl Shapirob5573532011-07-12 18:22:59 -070061
Elliott Hughes7dc51662012-05-16 14:48:43 -070062static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
63
Ian Rogers5d76c432011-10-31 21:42:49 -070064void Thread::InitCardTable() {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080065 card_table_ = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
Ian Rogers5d76c432011-10-31 21:42:49 -070066}
67
Elliott Hughes99250ba2012-04-17 11:09:17 -070068#if !defined(__APPLE__)
Elliott Hughes3ea0f422012-04-16 17:01:43 -070069static void UnimplementedEntryPoint() {
70 UNIMPLEMENTED(FATAL);
71}
Elliott Hughes99250ba2012-04-17 11:09:17 -070072#endif
Elliott Hughes3ea0f422012-04-16 17:01:43 -070073
buzbee3ea4ec52011-08-22 17:37:19 -070074void Thread::InitFunctionPointers() {
Elliott Hughes99250ba2012-04-17 11:09:17 -070075#if !defined(__APPLE__) // The Mac GCC is too old to accept this code.
Elliott Hughes3ea0f422012-04-16 17:01:43 -070076 // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
77 uintptr_t* begin = reinterpret_cast<uintptr_t*>(&entrypoints_);
78 uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(entrypoints_));
79 for (uintptr_t* it = begin; it != end; ++it) {
80 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
81 }
Elliott Hughes99250ba2012-04-17 11:09:17 -070082#endif
Ian Rogers57b86d42012-03-27 16:05:41 -070083 InitEntryPoints(&entrypoints_);
Elliott Hughesc0f09332012-03-26 13:27:06 -070084}
85
86void Thread::SetDebuggerUpdatesEnabled(bool enabled) {
87 LOG(INFO) << "Turning debugger updates " << (enabled ? "on" : "off") << " for " << *this;
Ian Rogers776ac1f2012-04-13 23:36:36 -070088#if !defined(ART_USE_LLVM_COMPILER)
Ian Rogers57b86d42012-03-27 16:05:41 -070089 ChangeDebuggerEntryPoint(&entrypoints_, enabled);
Ian Rogers776ac1f2012-04-13 23:36:36 -070090#else
91 UNIMPLEMENTED(FATAL);
92#endif
buzbee3ea4ec52011-08-22 17:37:19 -070093}
94
Brian Carlstromcaabb1b2011-10-11 18:09:13 -070095void Thread::InitTid() {
96 tid_ = ::art::GetTid();
97}
98
Brian Carlstromcaabb1b2011-10-11 18:09:13 -070099void Thread::InitAfterFork() {
Elliott Hughes8029cbe2012-05-22 09:13:08 -0700100 // One thread (us) survived the fork, but we have a new tid so we need to
101 // update the value stashed in this Thread*.
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700102 InitTid();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700103}
104
Brian Carlstrom78128a62011-09-15 17:21:19 -0700105void* Thread::CreateCallback(void* arg) {
Elliott Hughes93e74e82011-09-13 11:07:03 -0700106 Thread* self = reinterpret_cast<Thread*>(arg);
Ian Rogers120f1c72012-09-28 17:17:10 -0700107 Runtime* runtime = Runtime::Current();
108 if (runtime == NULL) {
109 LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
110 return NULL;
111 }
112 {
Ian Rogers50b35e22012-10-04 10:09:15 -0700113 // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
114 // after self->Init().
115 MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
Ian Rogers120f1c72012-09-28 17:17:10 -0700116 // Check that if we got here we cannot be shutting down (as shutdown should never have started
117 // while threads are being born).
118 CHECK(!runtime->IsShuttingDown());
119 self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
120 Runtime::Current()->EndThreadBirth();
121 }
Elliott Hughes47179f72011-10-27 16:44:39 -0700122 {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700123 ScopedObjectAccess soa(self);
Ian Rogers365c1022012-06-22 15:05:28 -0700124 {
Ian Rogers1f539342012-10-03 21:09:42 -0700125 SirtRef<String> thread_name(self, self->GetThreadName(soa));
Ian Rogers365c1022012-06-22 15:05:28 -0700126 self->SetThreadName(thread_name->ToModifiedUtf8().c_str());
127 }
Ian Rogers365c1022012-06-22 15:05:28 -0700128 Dbg::PostThreadStart(self);
129
130 // Invoke the 'run' method of our java.lang.Thread.
131 CHECK(self->peer_ != NULL);
Mathieu Chartierdbe6f462012-09-25 16:54:50 -0700132 Object* receiver = soa.Decode<Object*>(self->peer_);
Ian Rogers365c1022012-06-22 15:05:28 -0700133 jmethodID mid = WellKnownClasses::java_lang_Thread_run;
Mathieu Chartier66f19252012-09-18 08:57:04 -0700134 AbstractMethod* m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid));
Ian Rogers365c1022012-06-22 15:05:28 -0700135 m->Invoke(self, receiver, NULL, NULL);
Elliott Hughes47179f72011-10-27 16:44:39 -0700136 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700137 // Detach and delete self.
138 Runtime::Current()->GetThreadList()->Unregister(self);
Elliott Hughes93e74e82011-09-13 11:07:03 -0700139
Carl Shapirob5573532011-07-12 18:22:59 -0700140 return NULL;
141}
142
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700143Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, Object* thread_peer) {
144 Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_vmData);
145 Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetInt(thread_peer)));
146 // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
147 // to stop it from going away.
Ian Rogers81d425b2012-09-27 16:03:43 -0700148 if (kIsDebugBuild) {
149 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
150 if (result != NULL && !result->IsSuspended()) {
151 Locks::thread_list_lock_->AssertHeld(soa.Self());
152 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700153 }
154 return result;
Elliott Hughes761928d2011-11-16 18:33:03 -0800155}
156
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700157Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, jobject java_thread) {
158 return FromManagedThread(soa, soa.Decode<Object*>(java_thread));
Elliott Hughes01158d72011-09-19 19:47:10 -0700159}
160
Elliott Hughesab7b9dc2012-03-27 13:16:29 -0700161static size_t FixStackSize(size_t stack_size) {
Elliott Hughes7502e2a2011-10-02 13:24:37 -0700162 // A stack size of zero means "use the default".
Elliott Hughesd369bb72011-09-12 14:41:14 -0700163 if (stack_size == 0) {
164 stack_size = Runtime::Current()->GetDefaultStackSize();
165 }
Carl Shapiro61e019d2011-07-14 16:53:09 -0700166
Brian Carlstrom6414a972012-04-14 14:20:04 -0700167 // Dalvik used the bionic pthread default stack size for native threads,
168 // so include that here to support apps that expect large native stacks.
169 stack_size += 1 * MB;
170
Elliott Hughes7502e2a2011-10-02 13:24:37 -0700171 // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
172 if (stack_size < PTHREAD_STACK_MIN) {
173 stack_size = PTHREAD_STACK_MIN;
174 }
175
176 // It's likely that callers are trying to ensure they have at least a certain amount of
177 // stack space, so we should add our reserved space on top of what they requested, rather
178 // than implicitly take it away from them.
179 stack_size += Thread::kStackOverflowReservedBytes;
180
181 // Some systems require the stack size to be a multiple of the system page size, so round up.
182 stack_size = RoundUp(stack_size, kPageSize);
183
184 return stack_size;
185}
186
Elliott Hughesd8af1592012-04-16 20:40:15 -0700187static void SigAltStack(stack_t* new_stack, stack_t* old_stack) {
188 if (sigaltstack(new_stack, old_stack) == -1) {
189 PLOG(FATAL) << "sigaltstack failed";
190 }
191}
192
193static void SetUpAlternateSignalStack() {
194 // Create and set an alternate signal stack.
195 stack_t ss;
196 ss.ss_sp = new uint8_t[SIGSTKSZ];
197 ss.ss_size = SIGSTKSZ;
198 ss.ss_flags = 0;
199 CHECK(ss.ss_sp != NULL);
200 SigAltStack(&ss, NULL);
201
202 // Double-check that it worked.
203 ss.ss_sp = NULL;
204 SigAltStack(NULL, &ss);
205 VLOG(threads) << "Alternate signal stack is " << PrettySize(ss.ss_size) << " at " << ss.ss_sp;
206}
207
208static void TearDownAlternateSignalStack() {
209 // Get the pointer so we can free the memory.
210 stack_t ss;
211 SigAltStack(NULL, &ss);
212 uint8_t* allocated_signal_stack = reinterpret_cast<uint8_t*>(ss.ss_sp);
213
214 // Tell the kernel to stop using it.
215 ss.ss_sp = NULL;
216 ss.ss_flags = SS_DISABLE;
Elliott Hughes4c5231d2012-04-18 16:54:31 -0700217 ss.ss_size = SIGSTKSZ; // Avoid ENOMEM failure with Mac OS' buggy libc.
Elliott Hughesd8af1592012-04-16 20:40:15 -0700218 SigAltStack(&ss, NULL);
219
220 // Free it.
221 delete[] allocated_signal_stack;
222}
223
Ian Rogers120f1c72012-09-28 17:17:10 -0700224void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
Mathieu Chartierdbe6f462012-09-25 16:54:50 -0700225 CHECK(java_peer != NULL);
Ian Rogers120f1c72012-09-28 17:17:10 -0700226 Thread* self = static_cast<JNIEnvExt*>(env)->self;
227 Runtime* runtime = Runtime::Current();
Mathieu Chartierdbe6f462012-09-25 16:54:50 -0700228
Ian Rogers120f1c72012-09-28 17:17:10 -0700229 // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
230 bool thread_start_during_shutdown = false;
Elliott Hughes47179f72011-10-27 16:44:39 -0700231 {
Ian Rogers120f1c72012-09-28 17:17:10 -0700232 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
233 if (runtime->IsShuttingDown()) {
234 thread_start_during_shutdown = true;
235 } else {
236 runtime->StartThreadBirth();
237 }
Elliott Hughes47179f72011-10-27 16:44:39 -0700238 }
Ian Rogers120f1c72012-09-28 17:17:10 -0700239 if (thread_start_during_shutdown) {
240 ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
241 env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
242 return;
243 }
244
245 Thread* child_thread = new Thread(is_daemon);
246 // Use global JNI ref to hold peer live while child thread starts.
247 child_thread->peer_ = env->NewGlobalRef(java_peer);
248 stack_size = FixStackSize(stack_size);
249
250 // Thread.start is synchronized, so we know that vmData is 0, and know that we're not racing to
251 // assign it.
252 env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_vmData,
253 reinterpret_cast<jint>(child_thread));
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700254
255 pthread_t new_pthread;
256 pthread_attr_t attr;
257 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
258 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED");
259 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
Ian Rogers120f1c72012-09-28 17:17:10 -0700260 int pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, child_thread);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700261 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
262
Ian Rogers120f1c72012-09-28 17:17:10 -0700263 if (pthread_create_result != 0) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700264 // pthread_create(3) failed, so clean up.
Brian Carlstrom9efc3e02012-08-17 17:47:17 -0700265 {
Ian Rogers120f1c72012-09-28 17:17:10 -0700266 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
267 runtime->EndThreadBirth();
268 }
269 // Manually delete the global reference since Thread::Init will not have been run.
270 env->DeleteGlobalRef(child_thread->peer_);
271 child_thread->peer_ = NULL;
272 delete child_thread;
273 child_thread = NULL;
274 // TODO: remove from thread group?
275 env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_vmData, 0);
276 {
Brian Carlstrom9efc3e02012-08-17 17:47:17 -0700277 std::string msg(StringPrintf("pthread_create (%s stack) failed: %s",
278 PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
Ian Rogers120f1c72012-09-28 17:17:10 -0700279 ScopedObjectAccess soa(env);
280 soa.Self()->ThrowOutOfMemoryError(msg.c_str());
Brian Carlstrom9efc3e02012-08-17 17:47:17 -0700281 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700282 }
Carl Shapiro61e019d2011-07-14 16:53:09 -0700283}
284
Ian Rogers120f1c72012-09-28 17:17:10 -0700285void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
Elliott Hughes462c9442012-03-23 18:47:50 -0700286 // This function does all the initialization that must be run by the native thread it applies to.
287 // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
288 // we can handshake with the corresponding native thread when it's ready.) Check this native
289 // thread hasn't been through here already...
Elliott Hughescac6cc72011-11-03 20:31:21 -0700290 CHECK(Thread::Current() == NULL);
291
Elliott Hughesd8af1592012-04-16 20:40:15 -0700292 SetUpAlternateSignalStack();
Elliott Hughes93e74e82011-09-13 11:07:03 -0700293 InitCpu();
294 InitFunctionPointers();
Ian Rogers5d76c432011-10-31 21:42:49 -0700295 InitCardTable();
Ian Rogers01ae5802012-09-28 16:14:01 -0700296 InitTid();
Carl Shapiro61e019d2011-07-14 16:53:09 -0700297
Ian Rogers120f1c72012-09-28 17:17:10 -0700298 // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
299 // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
Elliott Hughes0d39c122012-06-06 16:41:17 -0700300 pthread_self_ = pthread_self();
Ian Rogers120f1c72012-09-28 17:17:10 -0700301 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
302 DCHECK_EQ(Thread::Current(), this);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700303
Ian Rogers120f1c72012-09-28 17:17:10 -0700304 thin_lock_id_ = thread_list->AllocThreadId();
Elliott Hughes93e74e82011-09-13 11:07:03 -0700305 InitStackHwm();
Carl Shapiro61e019d2011-07-14 16:53:09 -0700306
Ian Rogers120f1c72012-09-28 17:17:10 -0700307 jni_env_ = new JNIEnvExt(this, java_vm);
308 thread_list->Register(this);
Elliott Hughes93e74e82011-09-13 11:07:03 -0700309}
310
Ian Rogers365c1022012-06-22 15:05:28 -0700311Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group) {
Ian Rogers120f1c72012-09-28 17:17:10 -0700312 Thread* self;
313 Runtime* runtime = Runtime::Current();
314 if (runtime == NULL) {
315 LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
316 return NULL;
317 }
318 {
319 MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
320 if (runtime->IsShuttingDown()) {
321 LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
322 return NULL;
323 } else {
324 Runtime::Current()->StartThreadBirth();
325 self = new Thread(as_daemon);
326 self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
327 Runtime::Current()->EndThreadBirth();
328 }
329 }
Elliott Hughes93e74e82011-09-13 11:07:03 -0700330
Mathieu Chartierdbe6f462012-09-25 16:54:50 -0700331 CHECK_NE(self->GetState(), kRunnable);
332 self->SetState(kNative);
Elliott Hughes93e74e82011-09-13 11:07:03 -0700333
Elliott Hughescac6cc72011-11-03 20:31:21 -0700334 // If we're the main thread, ClassLinker won't be created until after we're attached,
335 // so that thread needs a two-stage attach. Regular threads don't need this hack.
Elliott Hughesd9c67be2012-02-02 19:54:06 -0800336 // In the compiler, all threads need this hack, because no-one's going to be getting
337 // a native peer!
338 if (self->thin_lock_id_ != ThreadList::kMainId && !Runtime::Current()->IsCompiler()) {
Elliott Hughes462c9442012-03-23 18:47:50 -0700339 self->CreatePeer(thread_name, as_daemon, thread_group);
Elliott Hughes06e3ad42012-02-07 14:51:57 -0800340 } else {
341 // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
Elliott Hughes22869a92012-03-27 14:08:24 -0700342 if (thread_name != NULL) {
343 self->name_->assign(thread_name);
344 ::art::SetThreadName(thread_name);
345 }
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700346 }
Elliott Hughescac6cc72011-11-03 20:31:21 -0700347
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700348 return self;
349}
350
Ian Rogers365c1022012-06-22 15:05:28 -0700351void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
352 Runtime* runtime = Runtime::Current();
353 CHECK(runtime->IsStarted());
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700354 JNIEnv* env = jni_env_;
355
Elliott Hughes462c9442012-03-23 18:47:50 -0700356 if (thread_group == NULL) {
Ian Rogers365c1022012-06-22 15:05:28 -0700357 thread_group = runtime->GetMainThreadGroup();
Elliott Hughes462c9442012-03-23 18:47:50 -0700358 }
Elliott Hughes726079d2011-10-07 18:43:44 -0700359 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
Elliott Hughes8daa0922011-09-11 13:46:25 -0700360 jint thread_priority = GetNativePriority();
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700361 jboolean thread_is_daemon = as_daemon;
362
Elliott Hugheseac76672012-05-24 21:56:51 -0700363 ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
Mathieu Chartierdbe6f462012-09-25 16:54:50 -0700364 if (peer.get() == NULL) {
365 CHECK(IsExceptionPending());
366 return;
Ian Rogers5d4bdc22011-11-02 22:15:43 -0700367 }
Mathieu Chartierdbe6f462012-09-25 16:54:50 -0700368 peer_ = env->NewGlobalRef(peer.get());
Elliott Hugheseac76672012-05-24 21:56:51 -0700369 env->CallNonvirtualVoidMethod(peer.get(),
370 WellKnownClasses::java_lang_Thread,
371 WellKnownClasses::java_lang_Thread_init,
Ian Rogers365c1022012-06-22 15:05:28 -0700372 thread_group, thread_name.get(), thread_priority, thread_is_daemon);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700373 AssertNoPendingException();
Elliott Hughesd369bb72011-09-12 14:41:14 -0700374
Ian Rogers120f1c72012-09-28 17:17:10 -0700375 Thread* self = this;
376 DCHECK_EQ(self, Thread::Current());
377 jni_env_->SetIntField(peer.get(), WellKnownClasses::java_lang_Thread_vmData,
378 reinterpret_cast<jint>(self));
379
380 ScopedObjectAccess soa(self);
Ian Rogers1f539342012-10-03 21:09:42 -0700381 SirtRef<String> peer_thread_name(soa.Self(), GetThreadName(soa));
Brian Carlstrom00fae582011-10-28 01:16:28 -0700382 if (peer_thread_name.get() == NULL) {
Ian Rogers120f1c72012-09-28 17:17:10 -0700383 Object* native_peer = soa.Decode<Object*>(peer.get());
Brian Carlstrom00fae582011-10-28 01:16:28 -0700384 // The Thread constructor should have set the Thread.name to a
385 // non-null value. However, because we can run without code
386 // available (in the compiler, in tests), we manually assign the
387 // fields the constructor should have set.
Mathieu Chartierdbe6f462012-09-25 16:54:50 -0700388 soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
389 SetBoolean(native_peer, thread_is_daemon);
390 soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
391 SetObject(native_peer, soa.Decode<Object*>(thread_group));
392 soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
393 SetObject(native_peer, soa.Decode<Object*>(thread_name.get()));
394 soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
395 SetInt(native_peer, thread_priority);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700396 peer_thread_name.reset(GetThreadName(soa));
Brian Carlstrom00fae582011-10-28 01:16:28 -0700397 }
Elliott Hughes225f5a12012-06-11 11:23:48 -0700398 // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
Brian Carlstrom00fae582011-10-28 01:16:28 -0700399 if (peer_thread_name.get() != NULL) {
Elliott Hughes899e7892012-01-24 14:57:32 -0800400 SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
Brian Carlstrom00fae582011-10-28 01:16:28 -0700401 }
Carl Shapiro61e019d2011-07-14 16:53:09 -0700402}
403
Elliott Hughes899e7892012-01-24 14:57:32 -0800404void Thread::SetThreadName(const char* name) {
405 name_->assign(name);
406 ::art::SetThreadName(name);
407 Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
408}
409
Elliott Hughesbe759c62011-09-08 19:38:21 -0700410void Thread::InitStackHwm() {
Elliott Hughese1884192012-04-23 12:38:15 -0700411 void* stack_base;
412 size_t stack_size;
Ian Rogers120f1c72012-09-28 17:17:10 -0700413 GetThreadStack(pthread_self_, stack_base, stack_size);
Elliott Hughes36ecb782012-04-17 16:55:45 -0700414
415 // TODO: include this in the thread dumps; potentially useful in SIGQUIT output?
Elliott Hughese1884192012-04-23 12:38:15 -0700416 VLOG(threads) << StringPrintf("Native stack is at %p (%s)", stack_base, PrettySize(stack_size).c_str());
417
418 stack_begin_ = reinterpret_cast<byte*>(stack_base);
419 stack_size_ = stack_size;
Elliott Hughes36ecb782012-04-17 16:55:45 -0700420
Ian Rogers932746a2011-09-22 18:57:50 -0700421 if (stack_size_ <= kStackOverflowReservedBytes) {
Elliott Hughes3d30d9b2011-12-07 17:35:48 -0800422 LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << stack_size_ << " bytes)";
Elliott Hughesbe759c62011-09-08 19:38:21 -0700423 }
Elliott Hughes449b4bd2011-09-09 12:01:38 -0700424
Elliott Hughese1884192012-04-23 12:38:15 -0700425 // TODO: move this into the Linux GetThreadStack implementation.
426#if !defined(__APPLE__)
Elliott Hughes36ecb782012-04-17 16:55:45 -0700427 // If we're the main thread, check whether we were run with an unlimited stack. In that case,
428 // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
429 // will be broken because we'll die long before we get close to 2GB.
Ian Rogers120f1c72012-09-28 17:17:10 -0700430 bool is_main_thread = (::art::GetTid() == getpid());
431 if (is_main_thread) {
Elliott Hughes36ecb782012-04-17 16:55:45 -0700432 rlimit stack_limit;
433 if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
434 PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
435 }
436 if (stack_limit.rlim_cur == RLIM_INFINITY) {
437 // Find the default stack size for new threads...
438 pthread_attr_t default_attributes;
439 size_t default_stack_size;
440 CHECK_PTHREAD_CALL(pthread_attr_init, (&default_attributes), "default stack size query");
441 CHECK_PTHREAD_CALL(pthread_attr_getstacksize, (&default_attributes, &default_stack_size),
442 "default stack size query");
443 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&default_attributes), "default stack size query");
444
445 // ...and use that as our limit.
446 size_t old_stack_size = stack_size_;
447 stack_size_ = default_stack_size;
448 stack_begin_ += (old_stack_size - stack_size_);
Elliott Hughesfaf4ba02012-05-02 16:12:19 -0700449 VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
450 << " to " << PrettySize(stack_size_)
451 << " with base " << reinterpret_cast<void*>(stack_begin_);
Elliott Hughes36ecb782012-04-17 16:55:45 -0700452 }
453 }
Elliott Hughese1884192012-04-23 12:38:15 -0700454#endif
Elliott Hughes36ecb782012-04-17 16:55:45 -0700455
Ian Rogers932746a2011-09-22 18:57:50 -0700456 // Set stack_end_ to the bottom of the stack saving space of stack overflows
457 ResetDefaultStackEnd();
Elliott Hughes449b4bd2011-09-09 12:01:38 -0700458
459 // Sanity check.
460 int stack_variable;
Elliott Hughes398f64b2012-03-26 18:05:48 -0700461 CHECK_GT(&stack_variable, reinterpret_cast<void*>(stack_end_));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700462}
463
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700464void Thread::ShortDump(std::ostream& os) const {
465 os << "Thread[";
466 if (GetThinLockId() != 0) {
467 // If we're in kStarting, we won't have a thin lock id or tid yet.
468 os << GetThinLockId()
469 << ",tid=" << GetTid() << ',';
Elliott Hughese0918552011-10-28 17:18:29 -0700470 }
Ian Rogers474b6da2012-09-25 00:20:38 -0700471 os << GetState()
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700472 << ",Thread*=" << this
473 << ",peer=" << peer_
474 << ",\"" << *name_ << "\""
475 << "]";
Elliott Hughesa0957642011-09-02 14:27:33 -0700476}
477
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700478void Thread::Dump(std::ostream& os) const {
479 DumpState(os);
480 DumpStack(os);
481}
482
483String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const {
484 Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
Mathieu Chartierdbe6f462012-09-25 16:54:50 -0700485 Object* native_peer = soa.Decode<Object*>(peer_);
486 return (peer_ != NULL) ? reinterpret_cast<String*>(f->GetObject(native_peer)) : NULL;
Elliott Hughesfc861622011-10-17 17:57:47 -0700487}
488
Elliott Hughesffb465f2012-03-01 18:46:05 -0800489void Thread::GetThreadName(std::string& name) const {
490 name.assign(*name_);
491}
492
Ian Rogers474b6da2012-09-25 00:20:38 -0700493void Thread::AtomicSetFlag(ThreadFlag flag) {
Ian Rogers30e173f2012-09-26 14:35:03 -0700494 android_atomic_or(flag, &state_and_flags_.as_int);
Ian Rogers474b6da2012-09-25 00:20:38 -0700495}
496
497void Thread::AtomicClearFlag(ThreadFlag flag) {
Ian Rogers30e173f2012-09-26 14:35:03 -0700498 android_atomic_and(-1 ^ flag, &state_and_flags_.as_int);
Ian Rogers474b6da2012-09-25 00:20:38 -0700499}
500
501ThreadState Thread::SetState(ThreadState new_state) {
502 // Cannot use this code to change into Runnable as changing to Runnable should fail if
503 // old_state_and_flags.suspend_request is true.
504 DCHECK_NE(new_state, kRunnable);
505 DCHECK_EQ(this, Thread::Current());
Ian Rogers30e173f2012-09-26 14:35:03 -0700506 union StateAndFlags old_state_and_flags = state_and_flags_;
507 state_and_flags_.as_struct.state = new_state;
508 return static_cast<ThreadState>(old_state_and_flags.as_struct.state);
Ian Rogers474b6da2012-09-25 00:20:38 -0700509}
510
Ian Rogers01ae5802012-09-28 16:14:01 -0700511// Attempt to rectify locks so that we dump thread list with required locks before exiting.
512static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
Ian Rogers120f1c72012-09-28 17:17:10 -0700513 LOG(ERROR) << *thread << " suspend count already zero.";
Ian Rogers01ae5802012-09-28 16:14:01 -0700514 Locks::thread_suspend_count_lock_->Unlock(self);
515 if (!Locks::mutator_lock_->IsSharedHeld(self)) {
516 Locks::mutator_lock_->SharedTryLock(self);
517 if (!Locks::mutator_lock_->IsSharedHeld(self)) {
518 LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
519 }
520 }
521 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
522 Locks::thread_list_lock_->TryLock(self);
523 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
524 LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
525 }
526 }
527 std::ostringstream ss;
528 Runtime::Current()->GetThreadList()->DumpLocked(ss);
Ian Rogers120f1c72012-09-28 17:17:10 -0700529 LOG(FATAL) << ss.str();
Ian Rogers01ae5802012-09-28 16:14:01 -0700530}
531
532void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700533 DCHECK(delta == -1 || delta == +1 || delta == -debug_suspend_count_)
534 << delta << " " << debug_suspend_count_ << " " << this;
535 DCHECK_GE(suspend_count_, debug_suspend_count_) << this;
Ian Rogers01ae5802012-09-28 16:14:01 -0700536 Locks::thread_suspend_count_lock_->AssertHeld(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700537
Ian Rogers01ae5802012-09-28 16:14:01 -0700538 if (UNLIKELY(delta < 0 && suspend_count_ <= 0)) {
539 UnsafeLogFatalForSuspendCount(self, this);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700540 return;
541 }
Ian Rogers01ae5802012-09-28 16:14:01 -0700542
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700543 suspend_count_ += delta;
544 if (for_debugger) {
545 debug_suspend_count_ += delta;
546 }
Ian Rogers01ae5802012-09-28 16:14:01 -0700547
Ian Rogers474b6da2012-09-25 00:20:38 -0700548 if (suspend_count_ == 0) {
549 AtomicClearFlag(kSuspendRequest);
550 } else {
551 AtomicSetFlag(kSuspendRequest);
552 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700553}
554
Mathieu Chartier0e4627e2012-10-23 16:13:36 -0700555bool Thread::RequestCheckpoint(Closure* function) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700556 CHECK(!ReadFlag(kCheckpointRequest)) << "Already have a pending checkpoint request";
557 checkpoint_function_ = function;
558 union StateAndFlags old_state_and_flags = state_and_flags_;
559 // We must be runnable to request a checkpoint.
560 old_state_and_flags.as_struct.state = kRunnable;
561 union StateAndFlags new_state_and_flags = old_state_and_flags;
562 new_state_and_flags.as_struct.flags |= kCheckpointRequest;
563 int succeeded = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int,
564 &state_and_flags_.as_int);
565 return succeeded == 0;
566}
567
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700568void Thread::FullSuspendCheck() {
569 VLOG(threads) << this << " self-suspending";
570 // Make thread appear suspended to other threads, release mutator_lock_.
571 TransitionFromRunnableToSuspended(kSuspended);
572 // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
573 TransitionFromSuspendedToRunnable();
574 VLOG(threads) << this << " self-reviving";
575}
576
577void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
578 AssertThreadSuspensionIsAllowable();
Ian Rogers474b6da2012-09-25 00:20:38 -0700579 DCHECK_NE(new_state, kRunnable);
580 DCHECK_EQ(this, Thread::Current());
Ian Rogersc747cff2012-08-31 18:20:08 -0700581 // Change to non-runnable state, thereby appearing suspended to the system.
Ian Rogers474b6da2012-09-25 00:20:38 -0700582 DCHECK_EQ(GetState(), kRunnable);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700583 union StateAndFlags old_state_and_flags;
584 union StateAndFlags new_state_and_flags;
585 do {
586 old_state_and_flags = state_and_flags_;
587 // Copy over flags and try to clear the checkpoint bit if it is set.
588 new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags & ~kCheckpointRequest;
589 new_state_and_flags.as_struct.state = new_state;
590 } while (android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int,
591 &state_and_flags_.as_int) != 0);
592 // If we toggled the checkpoint flag we must have cleared it.
593 uint16_t flag_change = new_state_and_flags.as_struct.flags ^ old_state_and_flags.as_struct.flags;
594 if ((flag_change & kCheckpointRequest) != 0) {
595 RunCheckpointFunction();
596 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700597 // Release share on mutator_lock_.
Ian Rogers81d425b2012-09-27 16:03:43 -0700598 Locks::mutator_lock_->SharedUnlock(this);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700599}
600
601ThreadState Thread::TransitionFromSuspendedToRunnable() {
602 bool done = false;
Ian Rogers01ae5802012-09-28 16:14:01 -0700603 union StateAndFlags old_state_and_flags = state_and_flags_;
604 int16_t old_state = old_state_and_flags.as_struct.state;
605 DCHECK_NE(static_cast<ThreadState>(old_state), kRunnable);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700606 do {
Ian Rogers81d425b2012-09-27 16:03:43 -0700607 Locks::mutator_lock_->AssertNotHeld(this); // Otherwise we starve GC..
Ian Rogers01ae5802012-09-28 16:14:01 -0700608 old_state_and_flags = state_and_flags_;
609 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
610 if ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700611 // Wait while our suspend count is non-zero.
Ian Rogers81d425b2012-09-27 16:03:43 -0700612 MutexLock mu(this, *Locks::thread_suspend_count_lock_);
Ian Rogers01ae5802012-09-28 16:14:01 -0700613 old_state_and_flags = state_and_flags_;
614 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
615 while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700616 // Re-check when Thread::resume_cond_ is notified.
Ian Rogersc604d732012-10-14 16:09:54 -0700617 Thread::resume_cond_->Wait(this);
Ian Rogers01ae5802012-09-28 16:14:01 -0700618 old_state_and_flags = state_and_flags_;
619 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700620 }
Ian Rogers474b6da2012-09-25 00:20:38 -0700621 DCHECK_EQ(GetSuspendCount(), 0);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700622 }
623 // Re-acquire shared mutator_lock_ access.
Ian Rogers81d425b2012-09-27 16:03:43 -0700624 Locks::mutator_lock_->SharedLock(this);
Ian Rogers474b6da2012-09-25 00:20:38 -0700625 // Atomically change from suspended to runnable if no suspend request pending.
Ian Rogers01ae5802012-09-28 16:14:01 -0700626 old_state_and_flags = state_and_flags_;
627 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
628 if ((old_state_and_flags.as_struct.flags & kSuspendRequest) == 0) {
629 union StateAndFlags new_state_and_flags = old_state_and_flags;
630 new_state_and_flags.as_struct.state = kRunnable;
631 done = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int,
Ian Rogers81d425b2012-09-27 16:03:43 -0700632 &state_and_flags_.as_int)
Ian Rogers474b6da2012-09-25 00:20:38 -0700633 == 0;
634 }
635 if (!done) {
636 // Failed to transition to Runnable. Release shared mutator_lock_ access and try again.
Ian Rogers81d425b2012-09-27 16:03:43 -0700637 Locks::mutator_lock_->SharedUnlock(this);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700638 }
639 } while (!done);
Ian Rogers01ae5802012-09-28 16:14:01 -0700640 return static_cast<ThreadState>(old_state);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700641}
642
643Thread* Thread::SuspendForDebugger(jobject peer, bool request_suspension, bool* timeout) {
644 static const useconds_t kTimeoutUs = 30 * 1000000; // 30s.
645 useconds_t total_delay_us = 0;
646 useconds_t delay_us = 0;
647 bool did_suspend_request = false;
648 *timeout = false;
649 while (true) {
650 Thread* thread;
651 {
652 ScopedObjectAccess soa(Thread::Current());
Ian Rogers81d425b2012-09-27 16:03:43 -0700653 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700654 thread = Thread::FromManagedThread(soa, peer);
655 if (thread == NULL) {
656 LOG(WARNING) << "No such thread for suspend: " << peer;
657 return NULL;
658 }
659 {
Ian Rogers81d425b2012-09-27 16:03:43 -0700660 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700661 if (request_suspension) {
Ian Rogers01ae5802012-09-28 16:14:01 -0700662 thread->ModifySuspendCount(soa.Self(), +1, true /* for_debugger */);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700663 request_suspension = false;
664 did_suspend_request = true;
665 }
666 // IsSuspended on the current thread will fail as the current thread is changed into
667 // Runnable above. As the suspend count is now raised if this is the current thread
668 // it will self suspend on transition to Runnable, making it hard to work with. Its simpler
669 // to just explicitly handle the current thread in the callers to this code.
670 CHECK_NE(thread, soa.Self()) << "Attempt to suspend for debugger the current thread";
671 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
672 // count, or else we've waited and it has self suspended) or is the current thread, we're
673 // done.
674 if (thread->IsSuspended()) {
675 return thread;
676 }
677 if (total_delay_us >= kTimeoutUs) {
678 LOG(ERROR) << "Thread suspension timed out: " << peer;
679 if (did_suspend_request) {
Ian Rogers01ae5802012-09-28 16:14:01 -0700680 thread->ModifySuspendCount(soa.Self(), -1, true /* for_debugger */);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700681 }
682 *timeout = true;
683 return NULL;
684 }
685 }
686 // Release locks and come out of runnable state.
687 }
688 for (int i = kMaxMutexLevel; i >= 0; --i) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700689 BaseMutex* held_mutex = Thread::Current()->GetHeldMutex(static_cast<LockLevel>(i));
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700690 if (held_mutex != NULL) {
691 LOG(FATAL) << "Holding " << held_mutex->GetName()
692 << " while sleeping for thread suspension";
693 }
694 }
695 {
696 useconds_t new_delay_us = delay_us * 2;
697 CHECK_GE(new_delay_us, delay_us);
698 if (new_delay_us < 500000) { // Don't allow sleeping to be more than 0.5s.
699 delay_us = new_delay_us;
700 }
701 }
702 if (delay_us == 0) {
703 sched_yield();
704 // Default to 1 milliseconds (note that this gets multiplied by 2 before the first sleep).
705 delay_us = 500;
706 } else {
707 usleep(delay_us);
708 total_delay_us += delay_us;
709 }
710 }
711}
712
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700713void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
Elliott Hughesd369bb72011-09-12 14:41:14 -0700714 std::string group_name;
715 int priority;
716 bool is_daemon = false;
Ian Rogers81d425b2012-09-27 16:03:43 -0700717 Thread* self = Thread::Current();
Elliott Hughesdcc24742011-09-07 14:02:44 -0700718
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700719 if (thread != NULL && thread->peer_ != NULL) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700720 ScopedObjectAccess soa(self);
Mathieu Chartierdbe6f462012-09-25 16:54:50 -0700721 Object* native_peer = soa.Decode<Object*>(thread->peer_);
722 priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(native_peer);
723 is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(native_peer);
Elliott Hughesd369bb72011-09-12 14:41:14 -0700724
Ian Rogers120f1c72012-09-28 17:17:10 -0700725 Object* thread_group =
726 soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(native_peer);
727
Elliott Hughesd369bb72011-09-12 14:41:14 -0700728 if (thread_group != NULL) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700729 Field* group_name_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
Elliott Hughesaf8d15a2012-05-29 09:12:18 -0700730 String* group_name_string = reinterpret_cast<String*>(group_name_field->GetObject(thread_group));
Elliott Hughesd369bb72011-09-12 14:41:14 -0700731 group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : "<null>";
732 }
733 } else {
Elliott Hughesd369bb72011-09-12 14:41:14 -0700734 priority = GetNativePriority();
Elliott Hughesdcc24742011-09-07 14:02:44 -0700735 }
Elliott Hughesd92bec42011-09-02 17:04:36 -0700736
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700737 std::string scheduler_group_name(GetSchedulerGroupName(tid));
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700738 if (scheduler_group_name.empty()) {
739 scheduler_group_name = "default";
Elliott Hughesd92bec42011-09-02 17:04:36 -0700740 }
741
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700742 if (thread != NULL) {
743 os << '"' << *thread->name_ << '"';
744 if (is_daemon) {
745 os << " daemon";
746 }
747 os << " prio=" << priority
748 << " tid=" << thread->GetThinLockId()
749 << " " << thread->GetState() << "\n";
750 } else {
Elliott Hughes289be852012-06-12 13:57:20 -0700751 os << '"' << ::art::GetThreadName(tid) << '"'
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700752 << " prio=" << priority
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700753 << " (not attached)\n";
Elliott Hughesd92bec42011-09-02 17:04:36 -0700754 }
Elliott Hughesd92bec42011-09-02 17:04:36 -0700755
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700756 if (thread != NULL) {
Ian Rogers81d425b2012-09-27 16:03:43 -0700757 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700758 os << " | group=\"" << group_name << "\""
759 << " sCount=" << thread->suspend_count_
760 << " dsCount=" << thread->debug_suspend_count_
761 << " obj=" << reinterpret_cast<void*>(thread->peer_)
762 << " self=" << reinterpret_cast<const void*>(thread) << "\n";
763 }
Elliott Hughes0d39c122012-06-06 16:41:17 -0700764
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700765 os << " | sysTid=" << tid
766 << " nice=" << getpriority(PRIO_PROCESS, tid)
Elliott Hughes0d39c122012-06-06 16:41:17 -0700767 << " cgrp=" << scheduler_group_name;
768 if (thread != NULL) {
769 int policy;
770 sched_param sp;
771 CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->pthread_self_, &policy, &sp), __FUNCTION__);
772 os << " sched=" << policy << "/" << sp.sched_priority
773 << " handle=" << reinterpret_cast<void*>(thread->pthread_self_);
774 }
775 os << "\n";
Elliott Hughesd92bec42011-09-02 17:04:36 -0700776
777 // Grab the scheduler stats for this thread.
778 std::string scheduler_stats;
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700779 if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
Elliott Hughesd92bec42011-09-02 17:04:36 -0700780 scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'.
781 } else {
782 scheduler_stats = "0 0 0";
783 }
784
Elliott Hughesba0b9c52012-09-20 11:25:12 -0700785 char native_thread_state = '?';
Elliott Hughesd92bec42011-09-02 17:04:36 -0700786 int utime = 0;
787 int stime = 0;
788 int task_cpu = 0;
Elliott Hughesba0b9c52012-09-20 11:25:12 -0700789 GetTaskStats(tid, native_thread_state, utime, stime, task_cpu);
Elliott Hughesd92bec42011-09-02 17:04:36 -0700790
Elliott Hughesba0b9c52012-09-20 11:25:12 -0700791 os << " | state=" << native_thread_state
792 << " schedstat=( " << scheduler_stats << " )"
Elliott Hughesd92bec42011-09-02 17:04:36 -0700793 << " utm=" << utime
794 << " stm=" << stime
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700795 << " core=" << task_cpu
796 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
797 if (thread != NULL) {
798 os << " | stack=" << reinterpret_cast<void*>(thread->stack_begin_) << "-" << reinterpret_cast<void*>(thread->stack_end_)
799 << " stackSize=" << PrettySize(thread->stack_size_) << "\n";
800 }
801}
802
803void Thread::DumpState(std::ostream& os) const {
804 Thread::DumpState(os, this, GetTid());
Elliott Hughesd92bec42011-09-02 17:04:36 -0700805}
806
Ian Rogers0399dde2012-06-06 17:09:28 -0700807struct StackDumpVisitor : public StackVisitor {
Elliott Hughes08fc03a2012-06-26 17:34:00 -0700808 StackDumpVisitor(std::ostream& os, const Thread* thread, Context* context, bool can_allocate)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700809 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Elliott Hughes08fc03a2012-06-26 17:34:00 -0700810 : StackVisitor(thread->GetManagedStack(), thread->GetTraceStack(), context),
811 os(os), thread(thread), can_allocate(can_allocate),
812 last_method(NULL), last_line_number(0), repetition_count(0), frame_count(0) {
Elliott Hughesd369bb72011-09-12 14:41:14 -0700813 }
814
Ian Rogersbdb03912011-09-14 00:55:44 -0700815 virtual ~StackDumpVisitor() {
Elliott Hughese85d2e92012-05-01 14:02:10 -0700816 if (frame_count == 0) {
817 os << " (no managed stack frames)\n";
818 }
Elliott Hughesd369bb72011-09-12 14:41:14 -0700819 }
820
Ian Rogersb726dcb2012-09-05 08:57:23 -0700821 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier66f19252012-09-18 08:57:04 -0700822 AbstractMethod* m = GetMethod();
Ian Rogers0399dde2012-06-06 17:09:28 -0700823 if (m->IsRuntimeMethod()) {
Elliott Hughes530fa002012-03-12 11:44:49 -0700824 return true;
Ian Rogers90865722011-09-19 11:11:44 -0700825 }
Ian Rogers28ad40d2011-10-27 15:19:26 -0700826 const int kMaxRepetition = 3;
Elliott Hughesd369bb72011-09-12 14:41:14 -0700827 Class* c = m->GetDeclaringClass();
Ian Rogersb861dc02011-11-14 17:00:05 -0800828 const DexCache* dex_cache = c->GetDexCache();
829 int line_number = -1;
830 if (dex_cache != NULL) { // be tolerant of bad input
Ian Rogers4445a7e2012-10-05 17:19:13 -0700831 const DexFile& dex_file = *dex_cache->GetDexFile();
Ian Rogers0399dde2012-06-06 17:09:28 -0700832 line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
Ian Rogersb861dc02011-11-14 17:00:05 -0800833 }
Ian Rogers28ad40d2011-10-27 15:19:26 -0700834 if (line_number == last_line_number && last_method == m) {
835 repetition_count++;
Elliott Hughesd369bb72011-09-12 14:41:14 -0700836 } else {
Ian Rogers28ad40d2011-10-27 15:19:26 -0700837 if (repetition_count >= kMaxRepetition) {
838 os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
839 }
840 repetition_count = 0;
841 last_line_number = line_number;
842 last_method = m;
Elliott Hughesd369bb72011-09-12 14:41:14 -0700843 }
Ian Rogers28ad40d2011-10-27 15:19:26 -0700844 if (repetition_count < kMaxRepetition) {
845 os << " at " << PrettyMethod(m, false);
846 if (m->IsNative()) {
847 os << "(Native method)";
848 } else {
Ian Rogers6d4d9fc2011-11-30 16:24:48 -0800849 mh.ChangeMethod(m);
850 const char* source_file(mh.GetDeclaringClassSourceFile());
851 os << "(" << (source_file != NULL ? source_file : "unavailable")
Ian Rogers28ad40d2011-10-27 15:19:26 -0700852 << ":" << line_number << ")";
853 }
854 os << "\n";
Elliott Hughes08fc03a2012-06-26 17:34:00 -0700855 if (frame_count == 0) {
856 Monitor::DescribeWait(os, thread);
857 }
858 if (can_allocate) {
859 Monitor::DescribeLocks(os, this);
860 }
Ian Rogers28ad40d2011-10-27 15:19:26 -0700861 }
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700862
Elliott Hughes08fc03a2012-06-26 17:34:00 -0700863 ++frame_count;
Elliott Hughes530fa002012-03-12 11:44:49 -0700864 return true;
Elliott Hughesd369bb72011-09-12 14:41:14 -0700865 }
Elliott Hughes08fc03a2012-06-26 17:34:00 -0700866 std::ostream& os;
867 const Thread* thread;
868 bool can_allocate;
Ian Rogers6d4d9fc2011-11-30 16:24:48 -0800869 MethodHelper mh;
Mathieu Chartier66f19252012-09-18 08:57:04 -0700870 AbstractMethod* last_method;
Ian Rogers28ad40d2011-10-27 15:19:26 -0700871 int last_line_number;
872 int repetition_count;
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700873 int frame_count;
Elliott Hughesd369bb72011-09-12 14:41:14 -0700874};
875
Elliott Hughesd92bec42011-09-02 17:04:36 -0700876void Thread::DumpStack(std::ostream& os) const {
Elliott Hughesffb465f2012-03-01 18:46:05 -0800877 // If we're currently in native code, dump that stack before dumping the managed stack.
Mathieu Chartierdbe6f462012-09-25 16:54:50 -0700878 if (GetState() == kNative) {
Elliott Hughes46e251b2012-05-22 15:10:45 -0700879 DumpKernelStack(os, GetTid(), " kernel: ", false);
880 DumpNativeStack(os, GetTid(), " native: ", false);
Elliott Hughesffb465f2012-03-01 18:46:05 -0800881 }
Elliott Hughes08fc03a2012-06-26 17:34:00 -0700882 UniquePtr<Context> context(Context::Create());
883 StackDumpVisitor dumper(os, this, context.get(), !throwing_OutOfMemoryError_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700884 dumper.WalkStack();
Elliott Hughese27955c2011-08-26 15:21:24 -0700885}
886
Elliott Hughesbe759c62011-09-08 19:38:21 -0700887void Thread::ThreadExitCallback(void* arg) {
888 Thread* self = reinterpret_cast<Thread*>(arg);
Elliott Hughes6a607ad2012-07-13 20:40:00 -0700889 if (self->thread_exit_check_count_ == 0) {
890 LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's going to use a pthread_key_create destructor?): " << *self;
891 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
892 self->thread_exit_check_count_ = 1;
893 } else {
894 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
895 }
Carl Shapirob5573532011-07-12 18:22:59 -0700896}
897
Elliott Hughesbe759c62011-09-08 19:38:21 -0700898void Thread::Startup() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700899 {
Ian Rogers50b35e22012-10-04 10:09:15 -0700900 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_); // Keep GCC happy.
Ian Rogersc604d732012-10-14 16:09:54 -0700901 resume_cond_ = new ConditionVariable("Thread resumption condition variable",
902 *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700903 }
904
Carl Shapirob5573532011-07-12 18:22:59 -0700905 // Allocate a TLS slot.
Elliott Hughes8d768a92011-09-14 16:35:25 -0700906 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
Carl Shapirob5573532011-07-12 18:22:59 -0700907
908 // Double-check the TLS slot allocation.
909 if (pthread_getspecific(pthread_key_self_) != NULL) {
Elliott Hughes3d30d9b2011-12-07 17:35:48 -0800910 LOG(FATAL) << "Newly-created pthread TLS slot is not NULL";
Carl Shapirob5573532011-07-12 18:22:59 -0700911 }
Elliott Hughes038a8062011-09-18 14:12:41 -0700912}
Carl Shapirob5573532011-07-12 18:22:59 -0700913
Elliott Hughes038a8062011-09-18 14:12:41 -0700914void Thread::FinishStartup() {
Ian Rogers365c1022012-06-22 15:05:28 -0700915 Runtime* runtime = Runtime::Current();
916 CHECK(runtime->IsStarted());
Brian Carlstromb82b6872011-10-26 17:18:07 -0700917
Elliott Hughes01158d72011-09-19 19:47:10 -0700918 // Finish attaching the main thread.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700919 ScopedObjectAccess soa(Thread::Current());
Ian Rogers365c1022012-06-22 15:05:28 -0700920 Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
Jesse Wilson9a6bae82011-11-14 14:57:30 -0500921
Elliott Hughesaf8d15a2012-05-29 09:12:18 -0700922 Runtime::Current()->GetClassLinker()->RunRootClinits();
Carl Shapirob5573532011-07-12 18:22:59 -0700923}
924
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700925void Thread::Shutdown() {
Elliott Hughes8d768a92011-09-14 16:35:25 -0700926 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700927}
928
Ian Rogers52673ff2012-06-27 23:25:34 -0700929Thread::Thread(bool daemon)
Ian Rogers0399dde2012-06-06 17:09:28 -0700930 : suspend_count_(0),
931 card_table_(NULL),
932 exception_(NULL),
933 stack_end_(NULL),
934 managed_stack_(),
935 jni_env_(NULL),
936 self_(NULL),
Elliott Hughes47179f72011-10-27 16:44:39 -0700937 peer_(NULL),
Ian Rogers0399dde2012-06-06 17:09:28 -0700938 stack_begin_(NULL),
939 stack_size_(0),
940 thin_lock_id_(0),
941 tid_(0),
Elliott Hughese62934d2012-04-09 11:24:29 -0700942 wait_mutex_(new Mutex("a thread wait mutex")),
Ian Rogersc604d732012-10-14 16:09:54 -0700943 wait_cond_(new ConditionVariable("a thread wait condition variable", *wait_mutex_)),
Elliott Hughes8daa0922011-09-11 13:46:25 -0700944 wait_monitor_(NULL),
945 interrupted_(false),
Elliott Hughesdc33ad52011-09-16 19:46:51 -0700946 wait_next_(NULL),
Elliott Hughes8e4aac52011-09-26 17:03:36 -0700947 monitor_enter_object_(NULL),
Elliott Hughesdcc24742011-09-07 14:02:44 -0700948 top_sirt_(NULL),
Elliott Hughesdc33ad52011-09-16 19:46:51 -0700949 runtime_(NULL),
Elliott Hughes85d15452011-09-16 17:33:01 -0700950 class_loader_override_(NULL),
Elliott Hughes418dfe72011-10-06 18:56:27 -0700951 long_jump_context_(NULL),
Elliott Hughes726079d2011-10-07 18:43:44 -0700952 throwing_OutOfMemoryError_(false),
Ian Rogers0399dde2012-06-06 17:09:28 -0700953 debug_suspend_count_(0),
jeffhaoe343b762011-12-05 16:36:44 -0800954 debug_invoke_req_(new DebugInvokeReq),
Elliott Hughes899e7892012-01-24 14:57:32 -0800955 trace_stack_(new std::vector<TraceStackFrame>),
Ian Rogers0399dde2012-06-06 17:09:28 -0700956 name_(new std::string(kThreadNameDuringStartup)),
Ian Rogers52673ff2012-06-27 23:25:34 -0700957 daemon_(daemon),
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700958 pthread_self_(0),
Ian Rogers52673ff2012-06-27 23:25:34 -0700959 no_thread_suspension_(0),
Elliott Hughes6a607ad2012-07-13 20:40:00 -0700960 last_no_thread_suspension_cause_(NULL),
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700961 checkpoint_function_(0),
Elliott Hughes6a607ad2012-07-13 20:40:00 -0700962 thread_exit_check_count_(0) {
Elliott Hughesf5a7a472011-10-07 14:31:02 -0700963 CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread);
Ian Rogers30e173f2012-09-26 14:35:03 -0700964 state_and_flags_.as_struct.flags = 0;
965 state_and_flags_.as_struct.state = kNative;
Elliott Hughesffb465f2012-03-01 18:46:05 -0800966 memset(&held_mutexes_[0], 0, sizeof(held_mutexes_));
Elliott Hughesdcc24742011-09-07 14:02:44 -0700967}
968
Elliott Hughes7dc51662012-05-16 14:48:43 -0700969bool Thread::IsStillStarting() const {
970 // You might think you can check whether the state is kStarting, but for much of thread startup,
971 // the thread might also be in kVmWait.
972 // You might think you can check whether the peer is NULL, but the peer is actually created and
973 // assigned fairly early on, and needs to be.
974 // It turns out that the last thing to change is the thread name; that's a good proxy for "has
975 // this thread _ever_ entered kRunnable".
976 return (*name_ == kThreadNameDuringStartup);
977}
978
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700979void Thread::AssertNoPendingException() const {
980 if (UNLIKELY(IsExceptionPending())) {
981 ScopedObjectAccess soa(Thread::Current());
982 Throwable* exception = GetException();
983 LOG(FATAL) << "No pending exception expected: " << exception->Dump();
984 }
985}
986
987static void MonitorExitVisitor(const Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS {
988 Thread* self = reinterpret_cast<Thread*>(arg);
Elliott Hughes02b48d12011-09-07 17:15:51 -0700989 Object* entered_monitor = const_cast<Object*>(object);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700990 if (self->HoldsLock(entered_monitor)) {
991 LOG(WARNING) << "Calling MonitorExit on object "
992 << object << " (" << PrettyTypeOf(object) << ")"
993 << " left locked by native thread "
994 << *Thread::Current() << " which is detaching";
995 entered_monitor->MonitorExit(self);
996 }
Elliott Hughes02b48d12011-09-07 17:15:51 -0700997}
998
Elliott Hughesc0f09332012-03-26 13:27:06 -0700999void Thread::Destroy() {
Ian Rogers120f1c72012-09-28 17:17:10 -07001000 Thread* self = this;
1001 DCHECK_EQ(self, Thread::Current());
Elliott Hughes02b48d12011-09-07 17:15:51 -07001002
Elliott Hughes93e74e82011-09-13 11:07:03 -07001003 if (peer_ != NULL) {
Ian Rogers120f1c72012-09-28 17:17:10 -07001004 // We may need to call user-supplied managed code, do this before final clean-up.
1005 HandleUncaughtExceptions();
1006 RemoveFromThreadGroup();
Elliott Hughes534da072012-03-27 15:17:42 -07001007
Elliott Hughes29f27422011-09-18 16:02:18 -07001008 // this.vmData = 0;
Ian Rogers120f1c72012-09-28 17:17:10 -07001009 jni_env_->SetIntField(peer_, WellKnownClasses::java_lang_Thread_vmData, 0);
Elliott Hughes02b48d12011-09-07 17:15:51 -07001010
Ian Rogers120f1c72012-09-28 17:17:10 -07001011 {
1012 ScopedObjectAccess soa(self);
1013 Dbg::PostThreadDeath(self);
1014 }
Elliott Hughes02b48d12011-09-07 17:15:51 -07001015
Elliott Hughes29f27422011-09-18 16:02:18 -07001016 // Thread.join() is implemented as an Object.wait() on the Thread.lock
1017 // object. Signal anyone who is waiting.
Ian Rogers120f1c72012-09-28 17:17:10 -07001018 ScopedLocalRef<jobject> lock(jni_env_,
1019 jni_env_->GetObjectField(peer_,
1020 WellKnownClasses::java_lang_Thread_lock));
Elliott Hughes038a8062011-09-18 14:12:41 -07001021 // (This conditional is only needed for tests, where Thread.lock won't have been set.)
Ian Rogers120f1c72012-09-28 17:17:10 -07001022 if (lock.get() != NULL) {
1023 jni_env_->MonitorEnter(lock.get());
1024 jni_env_->CallVoidMethod(lock.get(), WellKnownClasses::java_lang_Object_notify);
1025 jni_env_->MonitorExit(lock.get());
Elliott Hughes5f791332011-09-15 17:45:30 -07001026 }
1027 }
Ian Rogers120f1c72012-09-28 17:17:10 -07001028
1029 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1030 if (jni_env_ != NULL) {
1031 jni_env_->monitors.VisitRoots(MonitorExitVisitor, self);
1032 }
Elliott Hughesc0f09332012-03-26 13:27:06 -07001033}
Elliott Hughes02b48d12011-09-07 17:15:51 -07001034
Elliott Hughesc0f09332012-03-26 13:27:06 -07001035Thread::~Thread() {
Mathieu Chartierdbe6f462012-09-25 16:54:50 -07001036 if (jni_env_ != NULL && peer_ != NULL) {
1037 // If pthread_create fails we don't have a jni env here.
1038 jni_env_->DeleteGlobalRef(peer_);
1039 }
1040 peer_ = NULL;
1041
Elliott Hughesc1674ed2011-08-25 18:09:09 -07001042 delete jni_env_;
Elliott Hughes02b48d12011-09-07 17:15:51 -07001043 jni_env_ = NULL;
1044
Mathieu Chartierdbe6f462012-09-25 16:54:50 -07001045 CHECK_NE(GetState(), kRunnable);
1046 // We may be deleting a still born thread.
1047 SetStateUnsafe(kTerminated);
Elliott Hughes85d15452011-09-16 17:33:01 -07001048
1049 delete wait_cond_;
1050 delete wait_mutex_;
1051
Ian Rogers776ac1f2012-04-13 23:36:36 -07001052#if !defined(ART_USE_LLVM_COMPILER)
Elliott Hughes85d15452011-09-16 17:33:01 -07001053 delete long_jump_context_;
Ian Rogers776ac1f2012-04-13 23:36:36 -07001054#endif
Elliott Hughes475fc232011-10-25 15:00:35 -07001055
1056 delete debug_invoke_req_;
jeffhaoe343b762011-12-05 16:36:44 -08001057 delete trace_stack_;
Elliott Hughes899e7892012-01-24 14:57:32 -08001058 delete name_;
Elliott Hughesd8af1592012-04-16 20:40:15 -07001059
1060 TearDownAlternateSignalStack();
Elliott Hughesc1674ed2011-08-25 18:09:09 -07001061}
1062
Ian Rogers120f1c72012-09-28 17:17:10 -07001063void Thread::HandleUncaughtExceptions() {
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001064 if (!IsExceptionPending()) {
1065 return;
1066 }
Ian Rogers120f1c72012-09-28 17:17:10 -07001067
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001068 // Get and clear the exception.
Ian Rogers120f1c72012-09-28 17:17:10 -07001069 ScopedLocalRef<jthrowable> exception(jni_env_, jni_env_->ExceptionOccurred());
1070 jni_env_->ExceptionClear();
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001071
1072 // If the thread has its own handler, use that.
Ian Rogers120f1c72012-09-28 17:17:10 -07001073 ScopedLocalRef<jobject> handler(jni_env_,
1074 jni_env_->GetObjectField(peer_,
1075 WellKnownClasses::java_lang_Thread_uncaughtHandler));
1076 if (handler.get() == NULL) {
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001077 // Otherwise use the thread group's default handler.
Ian Rogers120f1c72012-09-28 17:17:10 -07001078 handler.reset(jni_env_->GetObjectField(peer_, WellKnownClasses::java_lang_Thread_group));
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001079 }
1080
1081 // Call the handler.
Ian Rogers120f1c72012-09-28 17:17:10 -07001082 jni_env_->CallVoidMethod(handler.get(),
1083 WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException,
1084 peer_, exception.get());
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001085
1086 // If the handler threw, clear that exception too.
Ian Rogers120f1c72012-09-28 17:17:10 -07001087 jni_env_->ExceptionClear();
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001088}
1089
Ian Rogers120f1c72012-09-28 17:17:10 -07001090void Thread::RemoveFromThreadGroup() {
Brian Carlstrom4514d3c2011-10-21 17:01:31 -07001091 // this.group.removeThread(this);
1092 // group can be null if we're in the compiler or a test.
Ian Rogers120f1c72012-09-28 17:17:10 -07001093 ScopedLocalRef<jobject> group(jni_env_,
1094 jni_env_->GetObjectField(peer_,
1095 WellKnownClasses::java_lang_Thread_group));
1096 if (group.get() != NULL) {
1097 jni_env_->CallVoidMethod(group.get(), WellKnownClasses::java_lang_ThreadGroup_removeThread,
1098 peer_);
Brian Carlstrom4514d3c2011-10-21 17:01:31 -07001099 }
1100}
1101
Ian Rogers408f79a2011-08-23 18:22:33 -07001102size_t Thread::NumSirtReferences() {
Ian Rogersa8cd9f42011-08-19 16:43:41 -07001103 size_t count = 0;
Brian Carlstrom40381fb2011-10-19 14:13:40 -07001104 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
Ian Rogersa8cd9f42011-08-19 16:43:41 -07001105 count += cur->NumberOfReferences();
1106 }
1107 return count;
1108}
1109
Ian Rogers408f79a2011-08-23 18:22:33 -07001110bool Thread::SirtContains(jobject obj) {
1111 Object** sirt_entry = reinterpret_cast<Object**>(obj);
Brian Carlstrom40381fb2011-10-19 14:13:40 -07001112 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1113 if (cur->Contains(sirt_entry)) {
Ian Rogersa8cd9f42011-08-19 16:43:41 -07001114 return true;
1115 }
1116 }
Ian Rogers0399dde2012-06-06 17:09:28 -07001117 // JNI code invoked from portable code uses shadow frames rather than the SIRT.
1118 return managed_stack_.ShadowFramesContain(sirt_entry);
TDYa12728f1a142012-03-15 21:51:52 -07001119}
1120
Shih-wei Liao8dfc9d52011-09-28 18:06:15 -07001121void Thread::SirtVisitRoots(Heap::RootVisitor* visitor, void* arg) {
Brian Carlstrom40381fb2011-10-19 14:13:40 -07001122 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
Shih-wei Liao8dfc9d52011-09-28 18:06:15 -07001123 size_t num_refs = cur->NumberOfReferences();
1124 for (size_t j = 0; j < num_refs; j++) {
Brian Carlstrom40381fb2011-10-19 14:13:40 -07001125 Object* object = cur->GetReference(j);
Brian Carlstrom5e73f9c2011-10-11 11:28:12 -07001126 if (object != NULL) {
1127 visitor(object, arg);
1128 }
Shih-wei Liao8dfc9d52011-09-28 18:06:15 -07001129 }
1130 }
1131}
1132
Ian Rogers408f79a2011-08-23 18:22:33 -07001133Object* Thread::DecodeJObject(jobject obj) {
Ian Rogers81d425b2012-09-27 16:03:43 -07001134 Locks::mutator_lock_->AssertSharedHeld(this);
Ian Rogers408f79a2011-08-23 18:22:33 -07001135 if (obj == NULL) {
1136 return NULL;
1137 }
1138 IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1139 IndirectRefKind kind = GetIndirectRefKind(ref);
1140 Object* result;
1141 switch (kind) {
1142 case kLocal:
1143 {
Elliott Hughes69f5bc62011-08-24 09:26:14 -07001144 IndirectReferenceTable& locals = jni_env_->locals;
Elliott Hughescf4c6c42011-09-01 15:16:42 -07001145 result = const_cast<Object*>(locals.Get(ref));
Ian Rogers408f79a2011-08-23 18:22:33 -07001146 break;
1147 }
1148 case kGlobal:
1149 {
1150 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1151 IndirectReferenceTable& globals = vm->globals;
Ian Rogers81d425b2012-09-27 16:03:43 -07001152 MutexLock mu(this, vm->globals_lock);
Elliott Hughescf4c6c42011-09-01 15:16:42 -07001153 result = const_cast<Object*>(globals.Get(ref));
Ian Rogers408f79a2011-08-23 18:22:33 -07001154 break;
1155 }
1156 case kWeakGlobal:
1157 {
1158 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1159 IndirectReferenceTable& weak_globals = vm->weak_globals;
Ian Rogers81d425b2012-09-27 16:03:43 -07001160 MutexLock mu(this, vm->weak_globals_lock);
Elliott Hughescf4c6c42011-09-01 15:16:42 -07001161 result = const_cast<Object*>(weak_globals.Get(ref));
Ian Rogers408f79a2011-08-23 18:22:33 -07001162 if (result == kClearedJniWeakGlobal) {
1163 // This is a special case where it's okay to return NULL.
1164 return NULL;
1165 }
1166 break;
1167 }
1168 case kSirtOrInvalid:
1169 default:
1170 // TODO: make stack indirect reference table lookup more efficient
1171 // Check if this is a local reference in the SIRT
Ian Rogers0399dde2012-06-06 17:09:28 -07001172 if (SirtContains(obj)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001173 result = *reinterpret_cast<Object**>(obj); // Read from SIRT
Elliott Hughesc2dc62d2012-01-17 20:06:12 -08001174 } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) {
Ian Rogers408f79a2011-08-23 18:22:33 -07001175 // Assume an invalid local reference is actually a direct pointer.
1176 result = reinterpret_cast<Object*>(obj);
1177 } else {
Elliott Hughesa2501992011-08-26 19:39:54 -07001178 result = kInvalidIndirectRefObject;
Ian Rogers408f79a2011-08-23 18:22:33 -07001179 }
1180 }
1181
1182 if (result == NULL) {
Elliott Hughes3f6635a2012-06-19 13:37:49 -07001183 JniAbortF(NULL, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
Elliott Hughesa2501992011-08-26 19:39:54 -07001184 } else {
1185 if (result != kInvalidIndirectRefObject) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001186 Runtime::Current()->GetHeap()->VerifyObject(result);
Elliott Hughesa2501992011-08-26 19:39:54 -07001187 }
Ian Rogers408f79a2011-08-23 18:22:33 -07001188 }
Ian Rogers408f79a2011-08-23 18:22:33 -07001189 return result;
1190}
1191
Ian Rogers81d425b2012-09-27 16:03:43 -07001192// Implements java.lang.Thread.interrupted.
1193bool Thread::Interrupted() {
Ian Rogers50b35e22012-10-04 10:09:15 -07001194 MutexLock mu(Thread::Current(), *wait_mutex_);
Ian Rogers81d425b2012-09-27 16:03:43 -07001195 bool interrupted = interrupted_;
1196 interrupted_ = false;
1197 return interrupted;
1198}
1199
1200// Implements java.lang.Thread.isInterrupted.
1201bool Thread::IsInterrupted() {
Ian Rogers50b35e22012-10-04 10:09:15 -07001202 MutexLock mu(Thread::Current(), *wait_mutex_);
Ian Rogers81d425b2012-09-27 16:03:43 -07001203 return interrupted_;
1204}
1205
1206void Thread::Interrupt() {
Ian Rogersc604d732012-10-14 16:09:54 -07001207 Thread* self = Thread::Current();
1208 MutexLock mu(self, *wait_mutex_);
Ian Rogers81d425b2012-09-27 16:03:43 -07001209 if (interrupted_) {
1210 return;
1211 }
1212 interrupted_ = true;
Ian Rogersc604d732012-10-14 16:09:54 -07001213 NotifyLocked(self);
Ian Rogers81d425b2012-09-27 16:03:43 -07001214}
1215
1216void Thread::Notify() {
Ian Rogersc604d732012-10-14 16:09:54 -07001217 Thread* self = Thread::Current();
1218 MutexLock mu(self, *wait_mutex_);
1219 NotifyLocked(self);
Ian Rogers81d425b2012-09-27 16:03:43 -07001220}
1221
Ian Rogersc604d732012-10-14 16:09:54 -07001222void Thread::NotifyLocked(Thread* self) {
Ian Rogers81d425b2012-09-27 16:03:43 -07001223 if (wait_monitor_ != NULL) {
Ian Rogersc604d732012-10-14 16:09:54 -07001224 wait_cond_->Signal(self);
Ian Rogers81d425b2012-09-27 16:03:43 -07001225 }
1226}
1227
Ian Rogers0399dde2012-06-06 17:09:28 -07001228class CountStackDepthVisitor : public StackVisitor {
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001229 public:
Ian Rogers0399dde2012-06-06 17:09:28 -07001230 CountStackDepthVisitor(const ManagedStack* stack,
Ian Rogersca190662012-06-26 15:45:57 -07001231 const std::vector<TraceStackFrame>* trace_stack)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001232 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Elliott Hughes08fc03a2012-06-26 17:34:00 -07001233 : StackVisitor(stack, trace_stack, NULL),
1234 depth_(0), skip_depth_(0), skipping_(true) {}
Elliott Hughesd369bb72011-09-12 14:41:14 -07001235
Ian Rogersb726dcb2012-09-05 08:57:23 -07001236 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Elliott Hughes29f27422011-09-18 16:02:18 -07001237 // We want to skip frames up to and including the exception's constructor.
Ian Rogers90865722011-09-19 11:11:44 -07001238 // Note we also skip the frame if it doesn't have a method (namely the callee
1239 // save frame)
Mathieu Chartier66f19252012-09-18 08:57:04 -07001240 AbstractMethod* m = GetMethod();
Ian Rogers0399dde2012-06-06 17:09:28 -07001241 if (skipping_ && !m->IsRuntimeMethod() &&
1242 !Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
Elliott Hughes29f27422011-09-18 16:02:18 -07001243 skipping_ = false;
1244 }
1245 if (!skipping_) {
Ian Rogers0399dde2012-06-06 17:09:28 -07001246 if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save).
Ian Rogers6b0870d2011-12-15 19:38:12 -08001247 ++depth_;
1248 }
Elliott Hughes29f27422011-09-18 16:02:18 -07001249 } else {
1250 ++skip_depth_;
1251 }
Elliott Hughes530fa002012-03-12 11:44:49 -07001252 return true;
Shih-wei Liao55df06b2011-08-26 14:39:27 -07001253 }
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001254
1255 int GetDepth() const {
Ian Rogersaaa20802011-09-11 21:47:37 -07001256 return depth_;
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001257 }
1258
Elliott Hughes29f27422011-09-18 16:02:18 -07001259 int GetSkipDepth() const {
1260 return skip_depth_;
1261 }
1262
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001263 private:
Ian Rogersaaa20802011-09-11 21:47:37 -07001264 uint32_t depth_;
Elliott Hughes29f27422011-09-18 16:02:18 -07001265 uint32_t skip_depth_;
1266 bool skipping_;
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001267};
1268
Ian Rogers0399dde2012-06-06 17:09:28 -07001269class BuildInternalStackTraceVisitor : public StackVisitor {
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001270 public:
Ian Rogers1f539342012-10-03 21:09:42 -07001271 explicit BuildInternalStackTraceVisitor(Thread* self, const ManagedStack* stack,
Ian Rogers0399dde2012-06-06 17:09:28 -07001272 const std::vector<TraceStackFrame>* trace_stack,
Ian Rogersca190662012-06-26 15:45:57 -07001273 int skip_depth)
Ian Rogers1f539342012-10-03 21:09:42 -07001274 : StackVisitor(stack, trace_stack, NULL), self_(self),
Elliott Hughes08fc03a2012-06-26 17:34:00 -07001275 skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL), method_trace_(NULL) {}
Ian Rogers283ed0d2012-02-16 15:25:09 -08001276
Ian Rogers1f539342012-10-03 21:09:42 -07001277 bool Init(int depth)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001278 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersaaa20802011-09-11 21:47:37 -07001279 // Allocate method trace with an extra slot that will hold the PC trace
Ian Rogers0399dde2012-06-06 17:09:28 -07001280 SirtRef<ObjectArray<Object> >
Ian Rogers1f539342012-10-03 21:09:42 -07001281 method_trace(self_,
Ian Rogers50b35e22012-10-04 10:09:15 -07001282 Runtime::Current()->GetClassLinker()->AllocObjectArray<Object>(self_,
1283 depth + 1));
Ian Rogers0399dde2012-06-06 17:09:28 -07001284 if (method_trace.get() == NULL) {
Ian Rogers283ed0d2012-02-16 15:25:09 -08001285 return false;
Elliott Hughes726079d2011-10-07 18:43:44 -07001286 }
Ian Rogers50b35e22012-10-04 10:09:15 -07001287 IntArray* dex_pc_trace = IntArray::Alloc(self_, depth);
Ian Rogers0399dde2012-06-06 17:09:28 -07001288 if (dex_pc_trace == NULL) {
Ian Rogers283ed0d2012-02-16 15:25:09 -08001289 return false;
Elliott Hughes726079d2011-10-07 18:43:44 -07001290 }
Ian Rogersaaa20802011-09-11 21:47:37 -07001291 // Save PC trace in last element of method trace, also places it into the
1292 // object graph.
Ian Rogers0399dde2012-06-06 17:09:28 -07001293 method_trace->Set(depth, dex_pc_trace);
1294 // Set the Object*s and assert that no thread suspension is now possible.
Ian Rogers52673ff2012-06-27 23:25:34 -07001295 const char* last_no_suspend_cause =
Ian Rogers1f539342012-10-03 21:09:42 -07001296 self_->StartAssertNoThreadSuspension("Building internal stack trace");
Ian Rogers52673ff2012-06-27 23:25:34 -07001297 CHECK(last_no_suspend_cause == NULL) << last_no_suspend_cause;
Ian Rogers0399dde2012-06-06 17:09:28 -07001298 method_trace_ = method_trace.get();
1299 dex_pc_trace_ = dex_pc_trace;
Ian Rogers283ed0d2012-02-16 15:25:09 -08001300 return true;
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001301 }
1302
Ian Rogers0399dde2012-06-06 17:09:28 -07001303 virtual ~BuildInternalStackTraceVisitor() {
Ian Rogers52673ff2012-06-27 23:25:34 -07001304 if (method_trace_ != NULL) {
Ian Rogers1f539342012-10-03 21:09:42 -07001305 self_->EndAssertNoThreadSuspension(NULL);
Ian Rogers52673ff2012-06-27 23:25:34 -07001306 }
Ian Rogers0399dde2012-06-06 17:09:28 -07001307 }
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001308
Ian Rogersb726dcb2012-09-05 08:57:23 -07001309 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers0399dde2012-06-06 17:09:28 -07001310 if (method_trace_ == NULL || dex_pc_trace_ == NULL) {
Elliott Hughes530fa002012-03-12 11:44:49 -07001311 return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError.
Elliott Hughes726079d2011-10-07 18:43:44 -07001312 }
Elliott Hughes29f27422011-09-18 16:02:18 -07001313 if (skip_depth_ > 0) {
1314 skip_depth_--;
Elliott Hughes530fa002012-03-12 11:44:49 -07001315 return true;
Elliott Hughes29f27422011-09-18 16:02:18 -07001316 }
Mathieu Chartier66f19252012-09-18 08:57:04 -07001317 AbstractMethod* m = GetMethod();
Ian Rogers0399dde2012-06-06 17:09:28 -07001318 if (m->IsRuntimeMethod()) {
1319 return true; // Ignore runtime frames (in particular callee save).
Ian Rogers6b0870d2011-12-15 19:38:12 -08001320 }
Ian Rogers0399dde2012-06-06 17:09:28 -07001321 method_trace_->Set(count_, m);
1322 dex_pc_trace_->Set(count_, GetDexPc());
Ian Rogersaaa20802011-09-11 21:47:37 -07001323 ++count_;
Elliott Hughes530fa002012-03-12 11:44:49 -07001324 return true;
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001325 }
1326
Ian Rogers0399dde2012-06-06 17:09:28 -07001327 ObjectArray<Object>* GetInternalStackTrace() const {
1328 return method_trace_;
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001329 }
1330
1331 private:
Ian Rogers1f539342012-10-03 21:09:42 -07001332 Thread* const self_;
Elliott Hughes29f27422011-09-18 16:02:18 -07001333 // How many more frames to skip.
1334 int32_t skip_depth_;
Ian Rogers0399dde2012-06-06 17:09:28 -07001335 // Current position down stack trace.
Ian Rogersaaa20802011-09-11 21:47:37 -07001336 uint32_t count_;
Ian Rogers0399dde2012-06-06 17:09:28 -07001337 // Array of dex PC values.
1338 IntArray* dex_pc_trace_;
1339 // An array of the methods on the stack, the last entry is a reference to the PC trace.
Ian Rogersaaa20802011-09-11 21:47:37 -07001340 ObjectArray<Object>* method_trace_;
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001341};
1342
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001343jobject Thread::CreateInternalStackTrace(const ScopedObjectAccess& soa) const {
Ian Rogersaaa20802011-09-11 21:47:37 -07001344 // Compute depth of stack
Ian Rogers0399dde2012-06-06 17:09:28 -07001345 CountStackDepthVisitor count_visitor(GetManagedStack(), GetTraceStack());
1346 count_visitor.WalkStack();
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001347 int32_t depth = count_visitor.GetDepth();
Elliott Hughes29f27422011-09-18 16:02:18 -07001348 int32_t skip_depth = count_visitor.GetSkipDepth();
Shih-wei Liao44175362011-08-28 16:59:17 -07001349
Ian Rogers1f539342012-10-03 21:09:42 -07001350 // Build internal stack trace.
1351 BuildInternalStackTraceVisitor build_trace_visitor(soa.Self(), GetManagedStack(), GetTraceStack(),
Ian Rogers0399dde2012-06-06 17:09:28 -07001352 skip_depth);
Ian Rogers1f539342012-10-03 21:09:42 -07001353 if (!build_trace_visitor.Init(depth)) {
1354 return NULL; // Allocation failed.
Ian Rogers283ed0d2012-02-16 15:25:09 -08001355 }
Ian Rogers0399dde2012-06-06 17:09:28 -07001356 build_trace_visitor.WalkStack();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001357 return soa.AddLocalReference<jobjectArray>(build_trace_visitor.GetInternalStackTrace());
Ian Rogersaaa20802011-09-11 21:47:37 -07001358}
1359
Elliott Hughes01158d72011-09-19 19:47:10 -07001360jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
1361 jobjectArray output_array, int* stack_depth) {
Ian Rogersaaa20802011-09-11 21:47:37 -07001362 // Transition into runnable state to work on Object*/Array*
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001363 ScopedObjectAccess soa(env);
Ian Rogersaaa20802011-09-11 21:47:37 -07001364 // Decode the internal stack trace into the depth, method trace and PC trace
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001365 ObjectArray<Object>* method_trace = soa.Decode<ObjectArray<Object>*>(internal);
Ian Rogers9074b992011-10-26 17:41:55 -07001366 int32_t depth = method_trace->GetLength() - 1;
Ian Rogersaaa20802011-09-11 21:47:37 -07001367 IntArray* pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
1368
1369 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1370
Elliott Hughes01158d72011-09-19 19:47:10 -07001371 jobjectArray result;
1372 ObjectArray<StackTraceElement>* java_traces;
1373 if (output_array != NULL) {
1374 // Reuse the array we were given.
1375 result = output_array;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001376 java_traces = soa.Decode<ObjectArray<StackTraceElement>*>(output_array);
Elliott Hughes01158d72011-09-19 19:47:10 -07001377 // ...adjusting the number of frames we'll write to not exceed the array length.
1378 depth = std::min(depth, java_traces->GetLength());
1379 } else {
1380 // Create java_trace array and place in local reference table
Ian Rogers50b35e22012-10-04 10:09:15 -07001381 java_traces = class_linker->AllocStackTraceElementArray(soa.Self(), depth);
Elliott Hughes30646832011-10-13 16:59:46 -07001382 if (java_traces == NULL) {
1383 return NULL;
1384 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001385 result = soa.AddLocalReference<jobjectArray>(java_traces);
Elliott Hughes01158d72011-09-19 19:47:10 -07001386 }
1387
1388 if (stack_depth != NULL) {
1389 *stack_depth = depth;
1390 }
Shih-wei Liao55df06b2011-08-26 14:39:27 -07001391
Ian Rogers6d4d9fc2011-11-30 16:24:48 -08001392 MethodHelper mh;
Shih-wei Liao9b576b42011-08-29 01:45:07 -07001393 for (int32_t i = 0; i < depth; ++i) {
Ian Rogersaaa20802011-09-11 21:47:37 -07001394 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
Mathieu Chartier66f19252012-09-18 08:57:04 -07001395 AbstractMethod* method = down_cast<AbstractMethod*>(method_trace->Get(i));
Ian Rogers6d4d9fc2011-11-30 16:24:48 -08001396 mh.ChangeMethod(method);
Ian Rogers0399dde2012-06-06 17:09:28 -07001397 uint32_t dex_pc = pc_trace->Get(i);
1398 int32_t line_number = mh.GetLineNumFromDexPC(dex_pc);
Ian Rogersaaa20802011-09-11 21:47:37 -07001399 // Allocate element, potentially triggering GC
Brian Carlstrom40381fb2011-10-19 14:13:40 -07001400 // TODO: reuse class_name_object via Class::name_?
Ian Rogers48601312011-12-07 16:45:19 -08001401 const char* descriptor = mh.GetDeclaringClassDescriptor();
1402 CHECK(descriptor != NULL);
1403 std::string class_name(PrettyDescriptor(descriptor));
Ian Rogers1f539342012-10-03 21:09:42 -07001404 SirtRef<String> class_name_object(soa.Self(),
Ian Rogers50b35e22012-10-04 10:09:15 -07001405 String::AllocFromModifiedUtf8(soa.Self(),
1406 class_name.c_str()));
Brian Carlstrom40381fb2011-10-19 14:13:40 -07001407 if (class_name_object.get() == NULL) {
1408 return NULL;
1409 }
Ian Rogers48601312011-12-07 16:45:19 -08001410 const char* method_name = mh.GetName();
1411 CHECK(method_name != NULL);
Ian Rogers50b35e22012-10-04 10:09:15 -07001412 SirtRef<String> method_name_object(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(),
1413 method_name));
Ian Rogers6d4d9fc2011-11-30 16:24:48 -08001414 if (method_name_object.get() == NULL) {
1415 return NULL;
1416 }
Ian Rogers48601312011-12-07 16:45:19 -08001417 const char* source_file = mh.GetDeclaringClassSourceFile();
Ian Rogers50b35e22012-10-04 10:09:15 -07001418 SirtRef<String> source_name_object(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(),
1419 source_file));
1420 StackTraceElement* obj = StackTraceElement::Alloc(soa.Self(),
1421 class_name_object.get(),
Ian Rogers6d4d9fc2011-11-30 16:24:48 -08001422 method_name_object.get(),
1423 source_name_object.get(),
Brian Carlstrom40381fb2011-10-19 14:13:40 -07001424 line_number);
Elliott Hughes30646832011-10-13 16:59:46 -07001425 if (obj == NULL) {
1426 return NULL;
1427 }
Ian Rogersaaa20802011-09-11 21:47:37 -07001428#ifdef MOVING_GARBAGE_COLLECTOR
1429 // Re-read after potential GC
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001430 java_traces = Decode<ObjectArray<Object>*>(soa.Env(), result);
1431 method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(soa.Env(), internal));
Ian Rogersaaa20802011-09-11 21:47:37 -07001432 pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
1433#endif
Shih-wei Liao55df06b2011-08-26 14:39:27 -07001434 java_traces->Set(i, obj);
1435 }
Ian Rogersaaa20802011-09-11 21:47:37 -07001436 return result;
Shih-wei Liao55df06b2011-08-26 14:39:27 -07001437}
1438
Elliott Hughes5cb5ad22011-10-02 12:13:39 -07001439void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
Elliott Hughesa5b897e2011-08-16 11:33:06 -07001440 va_list args;
1441 va_start(args, fmt);
Elliott Hughes4a2b4172011-09-20 17:08:25 -07001442 ThrowNewExceptionV(exception_class_descriptor, fmt, args);
Elliott Hughesa5b897e2011-08-16 11:33:06 -07001443 va_end(args);
Elliott Hughes4a2b4172011-09-20 17:08:25 -07001444}
1445
1446void Thread::ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) {
1447 std::string msg;
1448 StringAppendV(&msg, fmt, ap);
Elliott Hughes5cb5ad22011-10-02 12:13:39 -07001449 ThrowNewException(exception_class_descriptor, msg.c_str());
1450}
Elliott Hughes37f7a402011-08-22 18:56:01 -07001451
Elliott Hughes5cb5ad22011-10-02 12:13:39 -07001452void Thread::ThrowNewException(const char* exception_class_descriptor, const char* msg) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001453 AssertNoPendingException(); // Callers should either clear or call ThrowNewWrappedException.
Elliott Hughesa4f94742012-05-29 16:28:38 -07001454 ThrowNewWrappedException(exception_class_descriptor, msg);
1455}
1456
1457void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg) {
Elliott Hughese5b0dc82011-08-23 09:59:02 -07001458 // Convert "Ljava/lang/Exception;" into JNI-style "java/lang/Exception".
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001459 CHECK_EQ('L', exception_class_descriptor[0]);
Elliott Hughese5b0dc82011-08-23 09:59:02 -07001460 std::string descriptor(exception_class_descriptor + 1);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001461 CHECK_EQ(';', descriptor[descriptor.length() - 1]);
Elliott Hughese5b0dc82011-08-23 09:59:02 -07001462 descriptor.erase(descriptor.length() - 1);
1463
1464 JNIEnv* env = GetJniEnv();
Elliott Hughesa4f94742012-05-29 16:28:38 -07001465 jobject cause = env->ExceptionOccurred();
1466 env->ExceptionClear();
1467
Elliott Hughes726079d2011-10-07 18:43:44 -07001468 ScopedLocalRef<jclass> exception_class(env, env->FindClass(descriptor.c_str()));
Elliott Hughes30646832011-10-13 16:59:46 -07001469 if (exception_class.get() == NULL) {
1470 LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI FindClass failed: "
1471 << PrettyTypeOf(GetException());
1472 CHECK(IsExceptionPending());
1473 return;
1474 }
Brian Carlstromebd1fd22011-12-07 15:46:26 -08001475 if (!Runtime::Current()->IsStarted()) {
1476 // Something is trying to throw an exception without a started
1477 // runtime, which is the common case in the compiler. We won't be
1478 // able to invoke the constructor of the exception, so use
1479 // AllocObject which will not invoke a constructor.
1480 ScopedLocalRef<jthrowable> exception(
1481 env, reinterpret_cast<jthrowable>(env->AllocObject(exception_class.get())));
1482 if (exception.get() != NULL) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001483 ScopedObjectAccessUnchecked soa(env);
1484 Throwable* t = reinterpret_cast<Throwable*>(soa.Self()->DecodeJObject(exception.get()));
Ian Rogers50b35e22012-10-04 10:09:15 -07001485 t->SetDetailMessage(String::AllocFromModifiedUtf8(soa.Self(), msg));
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001486 soa.Self()->SetException(t);
Brian Carlstromebd1fd22011-12-07 15:46:26 -08001487 } else {
1488 LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI AllocObject failed: "
1489 << PrettyTypeOf(GetException());
1490 CHECK(IsExceptionPending());
1491 }
1492 return;
1493 }
Elliott Hughesa4f94742012-05-29 16:28:38 -07001494 int rc = ::art::ThrowNewException(env, exception_class.get(), msg, cause);
Elliott Hughes30646832011-10-13 16:59:46 -07001495 if (rc != JNI_OK) {
1496 LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI ThrowNew failed: "
1497 << PrettyTypeOf(GetException());
1498 CHECK(IsExceptionPending());
Elliott Hughes30646832011-10-13 16:59:46 -07001499 }
Elliott Hughesa5b897e2011-08-16 11:33:06 -07001500}
1501
Elliott Hughes2ced6a52011-10-16 18:44:48 -07001502void Thread::ThrowOutOfMemoryError(const char* msg) {
1503 LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
1504 msg, (throwing_OutOfMemoryError_ ? " (recursive case)" : ""));
Elliott Hughes726079d2011-10-07 18:43:44 -07001505 if (!throwing_OutOfMemoryError_) {
1506 throwing_OutOfMemoryError_ = true;
Elliott Hughes57aba862012-06-20 14:00:47 -07001507 ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
Elliott Hughes418dfe72011-10-06 18:56:27 -07001508 } else {
Elliott Hughes225f5a12012-06-11 11:23:48 -07001509 Dump(LOG(ERROR)); // The pre-allocated OOME has no stack, so help out and log one.
1510 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
Elliott Hughes418dfe72011-10-06 18:56:27 -07001511 }
Elliott Hughes726079d2011-10-07 18:43:44 -07001512 throwing_OutOfMemoryError_ = false;
Elliott Hughes79082e32011-08-25 12:07:32 -07001513}
1514
Elliott Hughes498508c2011-10-17 14:58:22 -07001515Thread* Thread::CurrentFromGdb() {
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001516 return Thread::Current();
1517}
1518
1519void Thread::DumpFromGdb() const {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -07001520 std::ostringstream ss;
1521 Dump(ss);
Elliott Hughes95572412011-12-13 18:14:20 -08001522 std::string str(ss.str());
Brian Carlstrom6b4ef022011-10-23 14:59:04 -07001523 // log to stderr for debugging command line processes
1524 std::cerr << str;
1525#ifdef HAVE_ANDROID_OS
1526 // log to logcat for debugging frameworks processes
1527 LOG(INFO) << str;
1528#endif
Elliott Hughesaccd83d2011-10-17 14:25:58 -07001529}
1530
Elliott Hughes98e20172012-04-24 15:38:13 -07001531struct EntryPointInfo {
1532 uint32_t offset;
1533 const char* name;
1534};
1535#define ENTRY_POINT_INFO(x) { ENTRYPOINT_OFFSET(x), #x }
1536static const EntryPointInfo gThreadEntryPointInfo[] = {
1537 ENTRY_POINT_INFO(pAllocArrayFromCode),
1538 ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck),
1539 ENTRY_POINT_INFO(pAllocObjectFromCode),
1540 ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck),
1541 ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode),
1542 ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck),
1543 ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode),
1544 ENTRY_POINT_INFO(pCanPutArrayElementFromCode),
1545 ENTRY_POINT_INFO(pCheckCastFromCode),
1546 ENTRY_POINT_INFO(pDebugMe),
1547 ENTRY_POINT_INFO(pUpdateDebuggerFromCode),
1548 ENTRY_POINT_INFO(pInitializeStaticStorage),
1549 ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode),
1550 ENTRY_POINT_INFO(pInitializeTypeFromCode),
1551 ENTRY_POINT_INFO(pResolveStringFromCode),
Ian Rogers474b6da2012-09-25 00:20:38 -07001552 ENTRY_POINT_INFO(pGetAndClearException),
Elliott Hughes98e20172012-04-24 15:38:13 -07001553 ENTRY_POINT_INFO(pSet32Instance),
1554 ENTRY_POINT_INFO(pSet32Static),
1555 ENTRY_POINT_INFO(pSet64Instance),
1556 ENTRY_POINT_INFO(pSet64Static),
1557 ENTRY_POINT_INFO(pSetObjInstance),
1558 ENTRY_POINT_INFO(pSetObjStatic),
1559 ENTRY_POINT_INFO(pGet32Instance),
1560 ENTRY_POINT_INFO(pGet32Static),
1561 ENTRY_POINT_INFO(pGet64Instance),
1562 ENTRY_POINT_INFO(pGet64Static),
1563 ENTRY_POINT_INFO(pGetObjInstance),
1564 ENTRY_POINT_INFO(pGetObjStatic),
1565 ENTRY_POINT_INFO(pHandleFillArrayDataFromCode),
Elliott Hughes98e20172012-04-24 15:38:13 -07001566 ENTRY_POINT_INFO(pFindNativeMethod),
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001567 ENTRY_POINT_INFO(pJniMethodStart),
1568 ENTRY_POINT_INFO(pJniMethodStartSynchronized),
1569 ENTRY_POINT_INFO(pJniMethodEnd),
1570 ENTRY_POINT_INFO(pJniMethodEndSynchronized),
1571 ENTRY_POINT_INFO(pJniMethodEndWithReference),
1572 ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized),
Elliott Hughes98e20172012-04-24 15:38:13 -07001573 ENTRY_POINT_INFO(pLockObjectFromCode),
1574 ENTRY_POINT_INFO(pUnlockObjectFromCode),
1575 ENTRY_POINT_INFO(pCmpgDouble),
1576 ENTRY_POINT_INFO(pCmpgFloat),
1577 ENTRY_POINT_INFO(pCmplDouble),
1578 ENTRY_POINT_INFO(pCmplFloat),
1579 ENTRY_POINT_INFO(pDadd),
1580 ENTRY_POINT_INFO(pDdiv),
1581 ENTRY_POINT_INFO(pDmul),
1582 ENTRY_POINT_INFO(pDsub),
1583 ENTRY_POINT_INFO(pF2d),
1584 ENTRY_POINT_INFO(pFmod),
Ian Rogers0183dd72012-09-17 23:06:51 -07001585 ENTRY_POINT_INFO(pSqrt),
Elliott Hughes98e20172012-04-24 15:38:13 -07001586 ENTRY_POINT_INFO(pI2d),
1587 ENTRY_POINT_INFO(pL2d),
1588 ENTRY_POINT_INFO(pD2f),
1589 ENTRY_POINT_INFO(pFadd),
1590 ENTRY_POINT_INFO(pFdiv),
1591 ENTRY_POINT_INFO(pFmodf),
1592 ENTRY_POINT_INFO(pFmul),
1593 ENTRY_POINT_INFO(pFsub),
1594 ENTRY_POINT_INFO(pI2f),
1595 ENTRY_POINT_INFO(pL2f),
1596 ENTRY_POINT_INFO(pD2iz),
1597 ENTRY_POINT_INFO(pF2iz),
1598 ENTRY_POINT_INFO(pIdivmod),
1599 ENTRY_POINT_INFO(pD2l),
1600 ENTRY_POINT_INFO(pF2l),
1601 ENTRY_POINT_INFO(pLdiv),
1602 ENTRY_POINT_INFO(pLdivmod),
1603 ENTRY_POINT_INFO(pLmul),
1604 ENTRY_POINT_INFO(pShlLong),
1605 ENTRY_POINT_INFO(pShrLong),
1606 ENTRY_POINT_INFO(pUshrLong),
1607 ENTRY_POINT_INFO(pIndexOf),
1608 ENTRY_POINT_INFO(pMemcmp16),
1609 ENTRY_POINT_INFO(pStringCompareTo),
1610 ENTRY_POINT_INFO(pMemcpy),
1611 ENTRY_POINT_INFO(pUnresolvedDirectMethodTrampolineFromCode),
1612 ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
1613 ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
1614 ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
1615 ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
1616 ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
1617 ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck),
1618 ENTRY_POINT_INFO(pCheckSuspendFromCode),
1619 ENTRY_POINT_INFO(pTestSuspendFromCode),
1620 ENTRY_POINT_INFO(pDeliverException),
1621 ENTRY_POINT_INFO(pThrowAbstractMethodErrorFromCode),
1622 ENTRY_POINT_INFO(pThrowArrayBoundsFromCode),
1623 ENTRY_POINT_INFO(pThrowDivZeroFromCode),
1624 ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode),
1625 ENTRY_POINT_INFO(pThrowNullPointerFromCode),
1626 ENTRY_POINT_INFO(pThrowStackOverflowFromCode),
Elliott Hughes98e20172012-04-24 15:38:13 -07001627};
1628#undef ENTRY_POINT_INFO
1629
Elliott Hughes28fa76d2012-04-09 17:31:46 -07001630void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) {
1631 CHECK_EQ(size_of_pointers, 4U); // TODO: support 64-bit targets.
Elliott Hughes98e20172012-04-24 15:38:13 -07001632
1633#define DO_THREAD_OFFSET(x) if (offset == static_cast<uint32_t>(OFFSETOF_VOLATILE_MEMBER(Thread, x))) { os << # x; return; }
Ian Rogers474b6da2012-09-25 00:20:38 -07001634 DO_THREAD_OFFSET(state_and_flags_);
Elliott Hughes98e20172012-04-24 15:38:13 -07001635 DO_THREAD_OFFSET(card_table_);
1636 DO_THREAD_OFFSET(exception_);
1637 DO_THREAD_OFFSET(jni_env_);
1638 DO_THREAD_OFFSET(self_);
1639 DO_THREAD_OFFSET(stack_end_);
Elliott Hughes98e20172012-04-24 15:38:13 -07001640 DO_THREAD_OFFSET(suspend_count_);
1641 DO_THREAD_OFFSET(thin_lock_id_);
Ian Rogers0399dde2012-06-06 17:09:28 -07001642 //DO_THREAD_OFFSET(top_of_managed_stack_);
1643 //DO_THREAD_OFFSET(top_of_managed_stack_pc_);
Elliott Hughes98e20172012-04-24 15:38:13 -07001644 DO_THREAD_OFFSET(top_sirt_);
Elliott Hughes28fa76d2012-04-09 17:31:46 -07001645#undef DO_THREAD_OFFSET
Elliott Hughes98e20172012-04-24 15:38:13 -07001646
1647 size_t entry_point_count = arraysize(gThreadEntryPointInfo);
1648 CHECK_EQ(entry_point_count * size_of_pointers, sizeof(EntryPoints));
1649 uint32_t expected_offset = OFFSETOF_MEMBER(Thread, entrypoints_);
1650 for (size_t i = 0; i < entry_point_count; ++i) {
Ian Rogers474b6da2012-09-25 00:20:38 -07001651 CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name;
Elliott Hughes98e20172012-04-24 15:38:13 -07001652 expected_offset += size_of_pointers;
1653 if (gThreadEntryPointInfo[i].offset == offset) {
1654 os << gThreadEntryPointInfo[i].name;
1655 return;
1656 }
1657 }
1658 os << offset;
Elliott Hughes28fa76d2012-04-09 17:31:46 -07001659}
1660
Ian Rogers0399dde2012-06-06 17:09:28 -07001661static const bool kDebugExceptionDelivery = false;
1662class CatchBlockStackVisitor : public StackVisitor {
Ian Rogersbdb03912011-09-14 00:55:44 -07001663 public:
Ian Rogers0399dde2012-06-06 17:09:28 -07001664 CatchBlockStackVisitor(Thread* self, Throwable* exception)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001665 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Ian Rogers0399dde2012-06-06 17:09:28 -07001666 : StackVisitor(self->GetManagedStack(), self->GetTraceStack(), self->GetLongJumpContext()),
1667 self_(self), exception_(exception), to_find_(exception->GetClass()), throw_method_(NULL),
1668 throw_frame_id_(0), throw_dex_pc_(0), handler_quick_frame_(NULL),
1669 handler_quick_frame_pc_(0), handler_dex_pc_(0), native_method_count_(0),
Ian Rogers57b86d42012-03-27 16:05:41 -07001670 method_tracing_active_(Runtime::Current()->IsMethodTracingActive()) {
Ian Rogers52673ff2012-06-27 23:25:34 -07001671 // Exception not in root sets, can't allow GC.
1672 last_no_assert_suspension_cause_ = self->StartAssertNoThreadSuspension("Finding catch block");
1673 }
1674
1675 ~CatchBlockStackVisitor() {
1676 LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump.
Ian Rogers67375ac2011-09-14 00:55:44 -07001677 }
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -07001678
Ian Rogersb726dcb2012-09-05 08:57:23 -07001679 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1680 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier66f19252012-09-18 08:57:04 -07001681 AbstractMethod* method = GetMethod();
Elliott Hughes530fa002012-03-12 11:44:49 -07001682 if (method == NULL) {
Ian Rogers0399dde2012-06-06 17:09:28 -07001683 // This is the upcall, we remember the frame and last pc so that we may long jump to them.
1684 handler_quick_frame_pc_ = GetCurrentQuickFramePc();
1685 handler_quick_frame_ = GetCurrentQuickFrame();
Ian Rogers57b86d42012-03-27 16:05:41 -07001686 return false; // End stack walk.
Elliott Hughes530fa002012-03-12 11:44:49 -07001687 }
1688 uint32_t dex_pc = DexFile::kDexNoIndex;
Ian Rogers57b86d42012-03-27 16:05:41 -07001689 if (method->IsRuntimeMethod()) {
Elliott Hughes530fa002012-03-12 11:44:49 -07001690 // ignore callee save method
Ian Rogers57b86d42012-03-27 16:05:41 -07001691 DCHECK(method->IsCalleeSaveMethod());
Elliott Hughes530fa002012-03-12 11:44:49 -07001692 } else {
Ian Rogers0399dde2012-06-06 17:09:28 -07001693 if (throw_method_ == NULL) {
1694 throw_method_ = method;
1695 throw_frame_id_ = GetFrameId();
1696 throw_dex_pc_ = GetDexPc();
Ian Rogers67375ac2011-09-14 00:55:44 -07001697 }
Ian Rogers0399dde2012-06-06 17:09:28 -07001698 if (method->IsNative()) {
1699 native_method_count_++;
1700 } else {
1701 // Unwind stack when an exception occurs during method tracing
1702 if (UNLIKELY(method_tracing_active_ && IsTraceExitPc(GetCurrentQuickFramePc()))) {
buzbee8320f382012-09-11 16:29:42 -07001703 uintptr_t pc = TraceMethodUnwindFromCode(Thread::Current());
Ian Rogers0c7abda2012-09-19 13:33:42 -07001704 dex_pc = method->ToDexPc(pc);
Ian Rogers0399dde2012-06-06 17:09:28 -07001705 } else {
1706 dex_pc = GetDexPc();
1707 }
1708 }
Elliott Hughes530fa002012-03-12 11:44:49 -07001709 }
1710 if (dex_pc != DexFile::kDexNoIndex) {
1711 uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc);
1712 if (found_dex_pc != DexFile::kDexNoIndex) {
Ian Rogers0399dde2012-06-06 17:09:28 -07001713 handler_dex_pc_ = found_dex_pc;
Ian Rogers0c7abda2012-09-19 13:33:42 -07001714 handler_quick_frame_pc_ = method->ToNativePc(found_dex_pc);
Ian Rogers0399dde2012-06-06 17:09:28 -07001715 handler_quick_frame_ = GetCurrentQuickFrame();
Ian Rogers57b86d42012-03-27 16:05:41 -07001716 return false; // End stack walk.
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -07001717 }
1718 }
Ian Rogers57b86d42012-03-27 16:05:41 -07001719 return true; // Continue stack walk.
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -07001720 }
Ian Rogersbdb03912011-09-14 00:55:44 -07001721
Ian Rogersb726dcb2012-09-05 08:57:23 -07001722 void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier66f19252012-09-18 08:57:04 -07001723 AbstractMethod* catch_method = *handler_quick_frame_;
Elliott Hughes6e9d22c2012-06-22 15:02:37 -07001724 Dbg::PostException(self_, throw_frame_id_, throw_method_, throw_dex_pc_,
Ian Rogers0399dde2012-06-06 17:09:28 -07001725 catch_method, handler_dex_pc_, exception_);
1726 if (kDebugExceptionDelivery) {
1727 if (catch_method == NULL) {
1728 LOG(INFO) << "Handler is upcall";
1729 } else {
Ian Rogers4445a7e2012-10-05 17:19:13 -07001730 const DexFile& dex_file = *catch_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
Ian Rogers0399dde2012-06-06 17:09:28 -07001731 int line_number = dex_file.GetLineNumFromPC(catch_method, handler_dex_pc_);
1732 LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")";
1733 }
1734 }
Ian Rogers52673ff2012-06-27 23:25:34 -07001735 self_->SetException(exception_); // Exception back in root set.
1736 self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_);
Ian Rogers0399dde2012-06-06 17:09:28 -07001737 // Place context back on thread so it will be available when we continue.
1738 self_->ReleaseLongJumpContext(context_);
1739 context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_));
1740 CHECK_NE(handler_quick_frame_pc_, 0u);
1741 context_->SetPC(handler_quick_frame_pc_);
1742 context_->SmashCallerSaves();
1743 context_->DoLongJump();
1744 }
1745
1746 private:
1747 Thread* self_;
1748 Throwable* exception_;
1749 // The type of the exception catch block to find.
Ian Rogersbdb03912011-09-14 00:55:44 -07001750 Class* to_find_;
Mathieu Chartier66f19252012-09-18 08:57:04 -07001751 AbstractMethod* throw_method_;
Ian Rogers0399dde2012-06-06 17:09:28 -07001752 JDWP::FrameId throw_frame_id_;
1753 uint32_t throw_dex_pc_;
1754 // Quick frame with found handler or last frame if no handler found.
Mathieu Chartier66f19252012-09-18 08:57:04 -07001755 AbstractMethod** handler_quick_frame_;
Ian Rogers0399dde2012-06-06 17:09:28 -07001756 // PC to branch to for the handler.
1757 uintptr_t handler_quick_frame_pc_;
1758 // Associated dex PC.
1759 uint32_t handler_dex_pc_;
Ian Rogers67375ac2011-09-14 00:55:44 -07001760 // Number of native methods passed in crawl (equates to number of SIRTs to pop)
1761 uint32_t native_method_count_;
Ian Rogers57b86d42012-03-27 16:05:41 -07001762 // Is method tracing active?
1763 const bool method_tracing_active_;
Ian Rogers52673ff2012-06-27 23:25:34 -07001764 // Support for nesting no thread suspension checks.
1765 const char* last_no_assert_suspension_cause_;
Ian Rogersbdb03912011-09-14 00:55:44 -07001766};
1767
Ian Rogersff1ed472011-09-20 13:46:24 -07001768void Thread::DeliverException() {
Elliott Hughesd07986f2011-12-06 18:27:45 -08001769 Throwable* exception = GetException(); // Get exception from thread
Ian Rogersff1ed472011-09-20 13:46:24 -07001770 CHECK(exception != NULL);
Ian Rogers28ad40d2011-10-27 15:19:26 -07001771 // Don't leave exception visible while we try to find the handler, which may cause class
Elliott Hughesd07986f2011-12-06 18:27:45 -08001772 // resolution.
Ian Rogers28ad40d2011-10-27 15:19:26 -07001773 ClearException();
1774 if (kDebugExceptionDelivery) {
Ian Rogersa32a6fd2012-02-06 20:18:44 -08001775 String* msg = exception->GetDetailMessage();
1776 std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : "");
1777 DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
Elliott Hughesc073b072012-05-24 19:29:17 -07001778 << ": " << str_msg << "\n");
Ian Rogers28ad40d2011-10-27 15:19:26 -07001779 }
Ian Rogers0399dde2012-06-06 17:09:28 -07001780 CatchBlockStackVisitor catch_finder(this, exception);
1781 catch_finder.WalkStack(true);
1782 catch_finder.DoLongJump();
Ian Rogers9a8a8882012-03-08 02:30:55 -08001783 LOG(FATAL) << "UNREACHABLE";
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -07001784}
1785
Ian Rogersbdb03912011-09-14 00:55:44 -07001786Context* Thread::GetLongJumpContext() {
Elliott Hughes85d15452011-09-16 17:33:01 -07001787 Context* result = long_jump_context_;
Ian Rogersbdb03912011-09-14 00:55:44 -07001788 if (result == NULL) {
1789 result = Context::Create();
Ian Rogers0399dde2012-06-06 17:09:28 -07001790 } else {
1791 long_jump_context_ = NULL; // Avoid context being shared.
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -07001792 }
Ian Rogersbdb03912011-09-14 00:55:44 -07001793 return result;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -07001794}
1795
Mathieu Chartier66f19252012-09-18 08:57:04 -07001796AbstractMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, size_t* frame_id) const {
Ian Rogers0399dde2012-06-06 17:09:28 -07001797 struct CurrentMethodVisitor : public StackVisitor {
1798 CurrentMethodVisitor(const ManagedStack* stack,
Ian Rogersca190662012-06-26 15:45:57 -07001799 const std::vector<TraceStackFrame>* trace_stack)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001800 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Elliott Hughes08fc03a2012-06-26 17:34:00 -07001801 : StackVisitor(stack, trace_stack, NULL), method_(NULL), dex_pc_(0), frame_id_(0) {}
Elliott Hughes8be2d402012-02-23 14:22:41 -08001802
Ian Rogersb726dcb2012-09-05 08:57:23 -07001803 virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier66f19252012-09-18 08:57:04 -07001804 AbstractMethod* m = GetMethod();
Ian Rogers0399dde2012-06-06 17:09:28 -07001805 if (m->IsRuntimeMethod()) {
1806 // Continue if this is a runtime method.
1807 return true;
1808 }
1809 method_ = m;
1810 dex_pc_ = GetDexPc();
1811 frame_id_ = GetFrameId();
1812 return false;
1813 }
Mathieu Chartier66f19252012-09-18 08:57:04 -07001814 AbstractMethod* method_;
Ian Rogers0399dde2012-06-06 17:09:28 -07001815 uint32_t dex_pc_;
1816 size_t frame_id_;
1817 };
1818
1819 CurrentMethodVisitor visitor(GetManagedStack(), GetTraceStack());
1820 visitor.WalkStack(false);
1821 if (dex_pc != NULL) {
1822 *dex_pc = visitor.dex_pc_;
Elliott Hughes9fd66f52011-10-16 12:13:26 -07001823 }
Ian Rogers0399dde2012-06-06 17:09:28 -07001824 if (frame_id != NULL) {
1825 *frame_id = visitor.frame_id_;
jeffhao33dc7712011-11-09 17:54:24 -08001826 }
Ian Rogers0399dde2012-06-06 17:09:28 -07001827 return visitor.method_;
jeffhao33dc7712011-11-09 17:54:24 -08001828}
1829
Elliott Hughes5f791332011-09-15 17:45:30 -07001830bool Thread::HoldsLock(Object* object) {
1831 if (object == NULL) {
1832 return false;
1833 }
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -07001834 return object->GetThinLockId() == thin_lock_id_;
Elliott Hughes5f791332011-09-15 17:45:30 -07001835}
1836
Mathieu Chartier6f1c9492012-10-15 12:08:41 -07001837// Visitor parameters are: (const Object* obj, size_t vreg, const AbstractMethod* method).
1838template <typename Visitor>
Ian Rogers0399dde2012-06-06 17:09:28 -07001839class ReferenceMapVisitor : public StackVisitor {
Ian Rogersd6b1f612011-09-27 13:38:14 -07001840 public:
Ian Rogers0399dde2012-06-06 17:09:28 -07001841 ReferenceMapVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack,
Mathieu Chartier6f1c9492012-10-15 12:08:41 -07001842 Context* context, const Visitor& visitor)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001843 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Mathieu Chartier6f1c9492012-10-15 12:08:41 -07001844 : StackVisitor(stack, trace_stack, context), visitor_(visitor) {}
Ian Rogersd6b1f612011-09-27 13:38:14 -07001845
Ian Rogersb726dcb2012-09-05 08:57:23 -07001846 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Brian Carlstrom6a4be3a2011-10-20 16:34:03 -07001847 if (false) {
Ian Rogers0399dde2012-06-06 17:09:28 -07001848 LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
1849 << StringPrintf("@ PC:%04x", GetDexPc());
Brian Carlstrom6a4be3a2011-10-20 16:34:03 -07001850 }
Ian Rogers0399dde2012-06-06 17:09:28 -07001851 ShadowFrame* shadow_frame = GetCurrentShadowFrame();
1852 if (shadow_frame != NULL) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -07001853 WrapperVisitor wrapperVisitor(visitor_, shadow_frame->GetMethod());
1854 shadow_frame->VisitRoots(wrapperVisitor);
Ian Rogers0399dde2012-06-06 17:09:28 -07001855 } else {
Mathieu Chartier66f19252012-09-18 08:57:04 -07001856 AbstractMethod* m = GetMethod();
Ian Rogers0399dde2012-06-06 17:09:28 -07001857 // Process register map (which native and runtime methods don't have)
Ian Rogers640495b2012-06-22 15:15:47 -07001858 if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
Ian Rogers0c7abda2012-09-19 13:33:42 -07001859 const uint8_t* native_gc_map = m->GetNativeGcMap();
1860 CHECK(native_gc_map != NULL) << PrettyMethod(m);
1861 mh_.ChangeMethod(m);
1862 const DexFile::CodeItem* code_item = mh_.GetCodeItem();
Elliott Hughescaf76542012-06-28 16:08:22 -07001863 DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
Ian Rogers0c7abda2012-09-19 13:33:42 -07001864 NativePcOffsetToReferenceMap map(native_gc_map);
Ian Rogers0399dde2012-06-06 17:09:28 -07001865 size_t num_regs = std::min(map.RegWidth() * 8,
1866 static_cast<size_t>(code_item->registers_size_));
Ian Rogers0c7abda2012-09-19 13:33:42 -07001867 if (num_regs > 0) {
1868 const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
1869 DCHECK(reg_bitmap != NULL);
1870 const VmapTable vmap_table(m->GetVmapTableRaw());
1871 uint32_t core_spills = m->GetCoreSpillMask();
1872 uint32_t fp_spills = m->GetFpSpillMask();
1873 size_t frame_size = m->GetFrameSizeInBytes();
1874 // For all dex registers in the bitmap
Mathieu Chartier66f19252012-09-18 08:57:04 -07001875 AbstractMethod** cur_quick_frame = GetCurrentQuickFrame();
Ian Rogers0c7abda2012-09-19 13:33:42 -07001876 DCHECK(cur_quick_frame != NULL);
1877 for (size_t reg = 0; reg < num_regs; ++reg) {
1878 // Does this register hold a reference?
1879 if (TestBitmap(reg, reg_bitmap)) {
1880 uint32_t vmap_offset;
1881 Object* ref;
1882 if (vmap_table.IsInContext(reg, vmap_offset)) {
1883 // Compute the register we need to load from the context
1884 uint32_t spill_mask = core_spills;
1885 CHECK_LT(vmap_offset, static_cast<uint32_t>(__builtin_popcount(spill_mask)));
1886 uint32_t matches = 0;
1887 uint32_t spill_shifts = 0;
1888 while (matches != (vmap_offset + 1)) {
1889 DCHECK_NE(spill_mask, 0u);
1890 matches += spill_mask & 1; // Add 1 if the low bit is set
1891 spill_mask >>= 1;
1892 spill_shifts++;
1893 }
1894 spill_shifts--; // wind back one as we want the last match
1895 ref = reinterpret_cast<Object*>(GetGPR(spill_shifts));
1896 } else {
1897 ref = reinterpret_cast<Object*>(GetVReg(cur_quick_frame, code_item, core_spills,
1898 fp_spills, frame_size, reg));
Mathieu Chartier6f1c9492012-10-15 12:08:41 -07001899 }
1900
1901 if (ref != NULL) {
1902 visitor_(ref, reg, m);
Ian Rogers0c7abda2012-09-19 13:33:42 -07001903 }
Ian Rogers0399dde2012-06-06 17:09:28 -07001904 }
Shih-wei Liao4f894e32011-09-27 21:33:19 -07001905 }
Ian Rogersd6b1f612011-09-27 13:38:14 -07001906 }
1907 }
1908 }
Elliott Hughes530fa002012-03-12 11:44:49 -07001909 return true;
Ian Rogersd6b1f612011-09-27 13:38:14 -07001910 }
1911
1912 private:
Mathieu Chartier6f1c9492012-10-15 12:08:41 -07001913
1914 class WrapperVisitor {
1915 public:
1916 WrapperVisitor(const Visitor& visitor, AbstractMethod* method)
1917 : visitor_(visitor),
1918 method_(method) {
1919
1920 }
1921
1922 void operator()(const Object* obj, size_t offset) const {
1923 visitor_(obj, offset, method_);
1924 }
1925
1926 private:
1927 const Visitor& visitor_;
1928 AbstractMethod* method_;
1929 };
1930
1931 static bool TestBitmap(int reg, const uint8_t* reg_vector) {
Ian Rogersd6b1f612011-09-27 13:38:14 -07001932 return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
1933 }
1934
Mathieu Chartier6f1c9492012-10-15 12:08:41 -07001935 // Visitor for when we visit a root.
1936 const Visitor& visitor_;
1937
Ian Rogers0c7abda2012-09-19 13:33:42 -07001938 // A method helper we keep around to avoid dex file/cache re-computations.
1939 MethodHelper mh_;
Ian Rogersd6b1f612011-09-27 13:38:14 -07001940};
1941
Mathieu Chartier6f1c9492012-10-15 12:08:41 -07001942class RootCallbackVisitor {
1943 public:
1944 RootCallbackVisitor(Heap::RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {
1945
1946 }
1947
1948 void operator()(const Object* obj, size_t, const AbstractMethod*) const {
1949 visitor_(obj, arg_);
1950 }
1951
1952 private:
1953 Heap::RootVisitor* visitor_;
1954 void* arg_;
1955};
1956
1957class VerifyCallbackVisitor {
1958 public:
1959 VerifyCallbackVisitor(Heap::VerifyRootVisitor* visitor, void* arg)
1960 : visitor_(visitor),
1961 arg_(arg) {
1962
1963 }
1964
1965 void operator()(const Object* obj, size_t vreg, const AbstractMethod* method) const {
1966 visitor_(obj, arg_, vreg, method);
1967 }
1968
1969 private:
1970 Heap::VerifyRootVisitor* visitor_;
1971 void* arg_;
1972};
1973
1974struct VerifyRootWrapperArg {
1975 Heap::VerifyRootVisitor* visitor;
1976 void* arg;
1977};
1978
1979static void VerifyRootWrapperCallback(const Object* root, void* arg) {
1980 VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
1981 wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
1982}
1983
1984void Thread::VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg) {
1985 // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we
1986 // don't have.
1987 VerifyRootWrapperArg wrapperArg;
1988 wrapperArg.arg = arg;
1989 wrapperArg.visitor = visitor;
1990
1991 if (exception_ != NULL) {
1992 VerifyRootWrapperCallback(exception_, &wrapperArg);
1993 }
1994 if (class_loader_override_ != NULL) {
1995 VerifyRootWrapperCallback(class_loader_override_, &wrapperArg);
1996 }
1997 jni_env_->locals.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
1998 jni_env_->monitors.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
1999
2000 SirtVisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2001
2002 // Visit roots on this thread's stack
2003 Context* context = GetLongJumpContext();
2004 VerifyCallbackVisitor visitorToCallback(visitor, arg);
2005 ReferenceMapVisitor<VerifyCallbackVisitor> mapper(GetManagedStack(), GetTraceStack(), context,
2006 visitorToCallback);
2007 mapper.WalkStack();
2008 ReleaseLongJumpContext(context);
2009}
2010
Ian Rogersd6b1f612011-09-27 13:38:14 -07002011void Thread::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
Elliott Hughesd369bb72011-09-12 14:41:14 -07002012 if (exception_ != NULL) {
2013 visitor(exception_, arg);
2014 }
Brian Carlstrom40381fb2011-10-19 14:13:40 -07002015 if (class_loader_override_ != NULL) {
2016 visitor(class_loader_override_, arg);
2017 }
Elliott Hughes410c0c82011-09-01 17:58:25 -07002018 jni_env_->locals.VisitRoots(visitor, arg);
2019 jni_env_->monitors.VisitRoots(visitor, arg);
Shih-wei Liao8dfc9d52011-09-28 18:06:15 -07002020
2021 SirtVisitRoots(visitor, arg);
2022
Ian Rogersd6b1f612011-09-27 13:38:14 -07002023 // Visit roots on this thread's stack
Ian Rogers0399dde2012-06-06 17:09:28 -07002024 Context* context = GetLongJumpContext();
Mathieu Chartier6f1c9492012-10-15 12:08:41 -07002025 RootCallbackVisitor visitorToCallback(visitor, arg);
2026 ReferenceMapVisitor<RootCallbackVisitor> mapper(GetManagedStack(), GetTraceStack(), context,
2027 visitorToCallback);
Ian Rogers0399dde2012-06-06 17:09:28 -07002028 mapper.WalkStack();
2029 ReleaseLongJumpContext(context);
Elliott Hughes410c0c82011-09-01 17:58:25 -07002030}
2031
jeffhao25045522012-03-13 19:34:37 -07002032#if VERIFY_OBJECT_ENABLED
Ian Rogers0399dde2012-06-06 17:09:28 -07002033static void VerifyObject(const Object* obj, void* arg) {
2034 Heap* heap = reinterpret_cast<Heap*>(arg);
2035 heap->VerifyObject(obj);
jeffhao25045522012-03-13 19:34:37 -07002036}
2037
2038void Thread::VerifyStack() {
jeffhaoe66ac792012-03-19 16:08:46 -07002039 UniquePtr<Context> context(Context::Create());
jeffhao4eb68ed2012-10-17 16:41:07 -07002040 RootCallbackVisitor visitorToCallback(VerifyObject, Runtime::Current()->GetHeap());
Mathieu Chartier6f1c9492012-10-15 12:08:41 -07002041 ReferenceMapVisitor<RootCallbackVisitor> mapper(GetManagedStack(), GetTraceStack(), context.get(),
jeffhao4eb68ed2012-10-17 16:41:07 -07002042 visitorToCallback);
Ian Rogers0399dde2012-06-06 17:09:28 -07002043 mapper.WalkStack();
jeffhao25045522012-03-13 19:34:37 -07002044}
2045#endif
2046
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002047// Set the stack end to that to be used during a stack overflow
2048void Thread::SetStackEndForStackOverflow() {
2049 // During stack overflow we allow use of the full stack
2050 if (stack_end_ == stack_begin_) {
2051 DumpStack(std::cerr);
2052 LOG(FATAL) << "Need to increase kStackOverflowReservedBytes (currently "
2053 << kStackOverflowReservedBytes << ")";
2054 }
2055
2056 stack_end_ = stack_begin_;
2057}
2058
Elliott Hughes330304d2011-08-12 14:28:05 -07002059std::ostream& operator<<(std::ostream& os, const Thread& thread) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002060 thread.ShortDump(os);
Elliott Hughes330304d2011-08-12 14:28:05 -07002061 return os;
2062}
2063
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002064#ifndef NDEBUG
2065void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
2066 CHECK_EQ(0u, no_thread_suspension_) << last_no_thread_suspension_cause_;
2067 if (check_locks) {
2068 bool bad_mutexes_held = false;
2069 for (int i = kMaxMutexLevel; i >= 0; --i) {
2070 // We expect no locks except the mutator_lock_.
2071 if (i != kMutatorLock) {
Ian Rogers81d425b2012-09-27 16:03:43 -07002072 BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002073 if (held_mutex != NULL) {
2074 LOG(ERROR) << "holding \"" << held_mutex->GetName()
2075 << "\" at point where thread suspension is expected";
Elliott Hughesffb465f2012-03-01 18:46:05 -08002076 bad_mutexes_held = true;
2077 }
2078 }
Elliott Hughesffb465f2012-03-01 18:46:05 -08002079 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002080 CHECK(!bad_mutexes_held);
Elliott Hughesffb465f2012-03-01 18:46:05 -08002081 }
2082}
Ian Rogers00f7d0e2012-07-19 15:28:27 -07002083#endif
Elliott Hughesa4060e52012-03-02 16:51:35 -08002084
Elliott Hughes8daa0922011-09-11 13:46:25 -07002085} // namespace art