More of the thread infrastructure.
We can now run managed code on multiple threads.
Change-Id: Ia4ce9c94602773db238c967c15194a6db780d12f
diff --git a/src/heap.cc b/src/heap.cc
index e346478..6228fcc 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -414,7 +414,7 @@
}
void Heap::Lock() {
- // TODO: grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
+ // TODO: grab the lock, but put ourselves into Thread::kVmWait if it looks like
// we're going to have to wait on the mutex.
lock_->Lock();
}
diff --git a/src/jni_internal.cc b/src/jni_internal.cc
index 0129fc5..3b8eeff 100644
--- a/src/jni_internal.cc
+++ b/src/jni_internal.cc
@@ -508,7 +508,7 @@
LOG(INFO) << "[" << *self << " waiting for \"" << path_ << "\" "
<< "JNI_OnLoad...]";
}
- ScopedThreadStateChange tsc(self, Thread::kWaiting); // TODO: VMWAIT
+ ScopedThreadStateChange tsc(self, Thread::kVmWait);
pthread_cond_wait(&jni_on_load_cond_, jni_on_load_lock_.GetImpl());
}
@@ -2751,12 +2751,12 @@
// TODO: automate some of these checks!
// This can execute slowly for a large library on a busy system, so we
- // want to switch from RUNNING to VMWAIT while it executes. This allows
+ // want to switch from kRunnable to kVmWait while it executes. This allows
// the GC to ignore us.
Thread* self = Thread::Current();
void* handle = NULL;
{
- ScopedThreadStateChange tsc(self, Thread::kWaiting); // TODO: VMWAIT
+ ScopedThreadStateChange tsc(self, Thread::kVmWait);
handle = dlopen(path.c_str(), RTLD_LAZY);
}
diff --git a/src/macros.h b/src/macros.h
index 047a95e..36afd27 100644
--- a/src/macros.h
+++ b/src/macros.h
@@ -120,4 +120,7 @@
&reinterpret_cast<t*>(16)->f) - \
reinterpret_cast<char*>(16))
+#define OFFSETOF_VOLATILE_MEMBER(t, f) \
+ (reinterpret_cast<volatile char*>(&reinterpret_cast<t*>(16)->f) - reinterpret_cast<volatile char*>(16))
+
#endif // ART_SRC_MACROS_H_
diff --git a/src/runtime.cc b/src/runtime.cc
index a1157e6..706d793 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -40,9 +40,11 @@
// Make sure our internal threads are dead before we start tearing down things they're using.
delete signal_catcher_;
+ // Make sure all other threads have terminated too.
+ delete thread_list_;
+
delete class_linker_;
Heap::Destroy();
- delete thread_list_;
delete intern_table_;
delete java_vm_;
Thread::Shutdown();
diff --git a/src/signal_catcher.cc b/src/signal_catcher.cc
index 7061b36..08db89a 100644
--- a/src/signal_catcher.cc
+++ b/src/signal_catcher.cc
@@ -91,7 +91,7 @@
}
int WaitForSignal(Thread* thread, sigset_t& mask) {
- ScopedThreadStateChange tsc(thread, Thread::kWaiting); // TODO: VMWAIT
+ ScopedThreadStateChange tsc(thread, Thread::kVmWait);
// Signals for sigwait() must be blocked but not ignored. We
// block signals like SIGQUIT for all threads, so the condition
diff --git a/src/thread.cc b/src/thread.cc
index 05b4b9b..0a0f6f5 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -324,11 +324,56 @@
return *reinterpret_cast<Method**>(next_sp);
}
-void* ThreadStart(void *arg) {
- UNIMPLEMENTED(FATAL);
+void* Thread::CreateCallback(void *arg) {
+ Thread* self = reinterpret_cast<Thread*>(arg);
+ Runtime* runtime = Runtime::Current();
+
+ self->Attach(runtime);
+
+ ClassLinker* class_linker = runtime->GetClassLinker();
+
+ Class* thread_class = class_linker->FindSystemClass("Ljava/lang/Thread;");
+ Class* string_class = class_linker->FindSystemClass("Ljava/lang/String;");
+
+ Field* name_field = thread_class->FindDeclaredInstanceField("name", string_class);
+ String* thread_name = reinterpret_cast<String*>(name_field->GetObject(self->peer_));
+ if (thread_name != NULL) {
+ SetThreadName(thread_name->ToModifiedUtf8().c_str());
+ }
+
+ // Wait until it's safe to start running code. (There may have been a suspend-all
+ // in progress while we were starting up.)
+ runtime->GetThreadList()->WaitForGo();
+
+ // TODO: say "hi" to the debugger.
+ //if (gDvm.debuggerConnected) {
+ // dvmDbgPostThreadStart(self);
+ //}
+
+ // Invoke the 'run' method of our java.lang.Thread.
+ CHECK(self->peer_ != NULL);
+ Object* receiver = self->peer_;
+ Method* Thread_run = thread_class->FindVirtualMethod("run", "()V");
+ Method* m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(Thread_run);
+ m->Invoke(self, receiver, NULL, NULL);
+
+ // Detach.
+ runtime->GetThreadList()->Unregister();
+
return NULL;
}
+void SetVmData(Object* managed_thread, Thread* native_thread) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+
+ Class* thread_class = class_linker->FindSystemClass("Ljava/lang/Thread;");
+ Class* int_class = class_linker->FindPrimitiveClass('I');
+
+ Field* vmData_field = thread_class->FindDeclaredInstanceField("vmData", int_class);
+
+ vmData_field->SetInt(managed_thread, reinterpret_cast<uintptr_t>(native_thread));
+}
+
void Thread::Create(Object* peer, size_t stack_size) {
CHECK(peer != NULL);
@@ -336,8 +381,12 @@
stack_size = Runtime::Current()->GetDefaultStackSize();
}
- Thread* self = new Thread;
- self->peer_ = peer;
+ Thread* native_thread = new Thread;
+ native_thread->peer_ = peer;
+
+ // Thread.start is synchronized, so we know that vmData is 0,
+ // and know that we're not racing to assign it.
+ SetVmData(peer, native_thread);
pthread_attr_t attr;
errno = pthread_attr_init(&attr);
@@ -355,7 +404,7 @@
PLOG(FATAL) << "pthread_attr_setstacksize(" << stack_size << ") failed";
}
- errno = pthread_create(&self->pthread_, &attr, ThreadStart, self);
+ errno = pthread_create(&native_thread->pthread_, &attr, Thread::CreateCallback, native_thread);
if (errno != 0) {
PLOG(FATAL) << "pthread_create failed";
}
@@ -364,28 +413,39 @@
if (errno != 0) {
PLOG(FATAL) << "pthread_attr_destroy failed";
}
+
+ // Let the child know when it's safe to start running.
+ Runtime::Current()->GetThreadList()->SignalGo(native_thread);
}
-Thread* Thread::Attach(const Runtime* runtime, const char* name, bool as_daemon) {
- Thread* self = new Thread;
+void Thread::Attach(const Runtime* runtime) {
+ InitCpu();
+ InitFunctionPointers();
- self->tid_ = ::art::GetTid();
- self->pthread_ = pthread_self();
+ thin_lock_id_ = Runtime::Current()->GetThreadList()->AllocThreadId();
- self->InitStackHwm();
+ tid_ = ::art::GetTid();
+ pthread_ = pthread_self();
- self->state_ = kRunnable;
+ InitStackHwm();
- SetThreadName(name);
-
- errno = pthread_setspecific(Thread::pthread_key_self_, self);
+ errno = pthread_setspecific(Thread::pthread_key_self_, this);
if (errno != 0) {
PLOG(FATAL) << "pthread_setspecific failed";
}
- self->jni_env_ = new JNIEnvExt(self, runtime->GetJavaVM());
+ jni_env_ = new JNIEnvExt(this, runtime->GetJavaVM());
- runtime->GetThreadList()->Register(self);
+ runtime->GetThreadList()->Register(this);
+}
+
+Thread* Thread::Attach(const Runtime* runtime, const char* name, bool as_daemon) {
+ Thread* self = new Thread;
+ self->Attach(runtime);
+
+ self->SetState(Thread::kRunnable);
+
+ SetThreadName(name);
// If we're the main thread, ClassLinker won't be created until after we're attached,
// so that thread needs a two-stage attach. Regular threads don't need this hack.
@@ -560,7 +620,7 @@
}
os << " prio=" << priority
<< " tid=" << GetThinLockId()
- << " " << state_ << "\n";
+ << " " << GetState() << "\n";
int suspend_count = 0; // TODO
int debug_suspend_count = 0; // TODO
@@ -674,17 +734,15 @@
native_to_managed_record_(NULL),
top_sirt_(NULL),
jni_env_(NULL),
+ state_(Thread::kUnknown),
exception_(NULL),
suspend_count_(0),
class_loader_override_(NULL) {
- InitCpu();
- InitFunctionPointers();
- thin_lock_id_ = Runtime::Current()->GetThreadList()->AllocThreadId();
}
void MonitorExitVisitor(const Object* object, void*) {
Object* entered_monitor = const_cast<Object*>(object);
- entered_monitor->MonitorExit();;
+ entered_monitor->MonitorExit();
}
Thread::~Thread() {
@@ -692,7 +750,9 @@
// a call stack that includes managed frames. (It's only valid if the stack is all-native.)
// On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
- jni_env_->monitors.VisitRoots(MonitorExitVisitor, NULL);
+ if (jni_env_ != NULL) {
+ jni_env_->monitors.VisitRoots(MonitorExitVisitor, NULL);
+ }
if (IsExceptionPending()) {
UNIMPLEMENTED(FATAL) << "threadExitUncaughtException()";
@@ -700,11 +760,13 @@
// TODO: ThreadGroup.removeThread(this);
- // TODO: this.vmData = 0;
+ if (peer_ != NULL) {
+ SetVmData(peer_, NULL);
+ }
// TODO: say "bye" to the debugger.
//if (gDvm.debuggerConnected) {
- // dvmDbgPostThreadDeath(self);
+ // dvmDbgPostThreadDeath(self);
//}
// Thread.join() is implemented as an Object.wait() on the Thread.lock
@@ -1041,19 +1103,23 @@
}
static const char* kStateNames[] = {
- "New",
+ "Terminated",
"Runnable",
+ "TimedWaiting",
"Blocked",
"Waiting",
- "TimedWaiting",
+ "Initializing",
+ "Starting",
"Native",
- "Terminated",
+ "VmWait",
+ "Suspended",
};
std::ostream& operator<<(std::ostream& os, const Thread::State& state) {
- if (state >= Thread::kNew && state <= Thread::kTerminated) {
- os << kStateNames[state-Thread::kNew];
+ int int_state = static_cast<int>(state);
+ if (state >= Thread::kTerminated && state <= Thread::kSuspended) {
+ os << kStateNames[int_state];
} else {
- os << "State[" << static_cast<int>(state) << "]";
+ os << "State[" << int_state << "]";
}
return os;
}
diff --git a/src/thread.h b/src/thread.h
index 9717a7c..0d79019 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -129,13 +129,19 @@
};
enum State {
kUnknown = -1,
- kNew,
- kRunnable,
- kBlocked,
- kWaiting,
- kTimedWaiting,
- kNative,
- kTerminated,
+
+ // These match up with JDWP values.
+ kTerminated = 0, // TERMINATED
+ kRunnable = 1, // RUNNABLE or running now
+ kTimedWaiting = 2, // TIMED_WAITING in Object.wait()
+ kBlocked = 3, // BLOCKED on a monitor
+ kWaiting = 4, // WAITING in Object.wait()
+ // Non-JDWP states.
+ kInitializing = 5, // allocated, not yet running --- TODO: unnecessary?
+ kStarting = 6, // native thread started, not yet ready to run managed code
+ kNative = 7, // off in a JNI native method
+ kVmWait = 8, // waiting on a VM resource
+ kSuspended = 9, // suspended, usually by GC or debugger
};
static const size_t kStackOverflowReservedBytes = 1024; // Space to throw a StackOverflowError in.
@@ -435,7 +441,7 @@
}
static ThreadOffset StateOffset() {
- return ThreadOffset(OFFSETOF_MEMBER(Thread, state_));
+ return ThreadOffset(OFFSETOF_VOLATILE_MEMBER(Thread, state_));
}
static ThreadOffset StackEndOffset() {
@@ -474,6 +480,9 @@
void DumpState(std::ostream& os) const;
void DumpStack(std::ostream& os) const;
+ void Attach(const Runtime* runtime);
+ static void* CreateCallback(void* arg);
+
void InitCpu();
void InitFunctionPointers();
void InitStackHwm();
@@ -527,7 +536,7 @@
// Every thread may have an associated JNI environment
JNIEnvExt* jni_env_;
- State state_;
+ volatile State state_;
// Initialized to "this". On certain architectures (such as x86) reading
// off of Thread::Current is easy but getting the address of Thread::Current
diff --git a/src/thread_list.cc b/src/thread_list.cc
index b0626c7..e3e4789 100644
--- a/src/thread_list.cc
+++ b/src/thread_list.cc
@@ -18,6 +18,8 @@
namespace art {
+pthread_cond_t ThreadList::thread_start_cond_ = PTHREAD_COND_INITIALIZER;
+
ThreadList::ThreadList() : lock_("ThreadList lock") {
}
@@ -57,7 +59,7 @@
void ThreadList::Unregister() {
Thread* self = Thread::Current();
- //LOG(INFO) << "ThreadList::Unregister() " << self;
+ //LOG(INFO) << "ThreadList::Unregister() " << *self;
MutexLock mu(lock_);
// Remove this thread from the list.
@@ -85,6 +87,59 @@
}
}
+/*
+ * Tell a new thread it's safe to start.
+ *
+ * We must hold the thread list lock before messing with another thread.
+ * In the general case we would also need to verify that the new thread was
+ * still in the thread list, but in our case the thread has not started
+ * executing user code and therefore has not had a chance to exit.
+ *
+ * We move it to kVmWait, and it then shifts itself to kRunning, which
+ * comes with a suspend-pending check. We do this after
+ */
+void ThreadList::SignalGo(Thread* child) {
+ Thread* self = Thread::Current();
+ CHECK(child != self);
+
+ {
+ MutexLock mu(lock_);
+
+ // We wait for the child to tell us that it's in the thread list.
+ while (child->GetState() != Thread::kStarting) {
+ pthread_cond_wait(&thread_start_cond_, lock_.GetImpl());
+ }
+ }
+
+ // If we switch out of runnable and then back in, we know there's no pending suspend.
+ self->SetState(Thread::kVmWait);
+ self->SetState(Thread::kRunnable);
+
+ // Tell the child that it's safe: it will see any future suspend request.
+ child->SetState(Thread::kVmWait);
+ pthread_cond_broadcast(&thread_start_cond_);
+}
+
+void ThreadList::WaitForGo() {
+ Thread* self = Thread::Current();
+ DCHECK(Contains(self));
+
+ MutexLock mu(lock_);
+
+ // Tell our parent that we're in the thread list.
+ self->SetState(Thread::kStarting);
+ pthread_cond_broadcast(&thread_start_cond_);
+
+ // Wait until our parent tells us there's no suspend still pending
+ // from before we were on the thread list.
+ while (self->GetState() != Thread::kVmWait) {
+ pthread_cond_wait(&thread_start_cond_, lock_.GetImpl());
+ }
+
+ // Enter the runnable state. We know that any pending suspend will affect us now.
+ self->SetState(Thread::kRunnable);
+}
+
uint32_t ThreadList::AllocThreadId() {
MutexLock mu(lock_);
for (size_t i = 0; i < allocated_ids_.size(); ++i) {
diff --git a/src/thread_list.h b/src/thread_list.h
index 12fdde4..df9eb7e 100644
--- a/src/thread_list.h
+++ b/src/thread_list.h
@@ -41,6 +41,9 @@
void VisitRoots(Heap::RootVisitor* visitor, void* arg) const;
+ void SignalGo(Thread* child);
+ void WaitForGo();
+
private:
uint32_t AllocThreadId();
void ReleaseThreadId(uint32_t id);
@@ -49,6 +52,8 @@
std::bitset<kMaxThreadId> allocated_ids_;
std::list<Thread*> list_;
+ static pthread_cond_t thread_start_cond_;
+
friend class Thread;
friend class ThreadListLock;
@@ -64,7 +69,7 @@
}
Thread::State old_state;
if (self != NULL) {
- old_state = self->SetState(Thread::kWaiting); // TODO: VMWAIT
+ old_state = self->SetState(Thread::kVmWait);
} else {
// This happens during VM shutdown.
old_state = Thread::kUnknown;