Merge "Wire up hprof." into dalvik-dev
diff --git a/src/dalvik_system_VMStack.cc b/src/dalvik_system_VMStack.cc
index 5a242f9..d87bc2b 100644
--- a/src/dalvik_system_VMStack.cc
+++ b/src/dalvik_system_VMStack.cc
@@ -26,7 +26,7 @@
 namespace {
 
 static jobject GetThreadStack(JNIEnv* env, jobject javaThread) {
-  ThreadListLock thread_list_lock;
+  ScopedThreadListLock thread_list_lock;
   Thread* thread = Thread::FromManagedThread(env, javaThread);
   return (thread != NULL) ? GetThreadStack(env, thread) : NULL;
 }
diff --git a/src/debugger.cc b/src/debugger.cc
index 9f4bc4d..828ee4a 100644
--- a/src/debugger.cc
+++ b/src/debugger.cc
@@ -833,7 +833,7 @@
   // We lock the thread list to avoid sending duplicate events or missing
   // a thread change. We should be okay holding this lock while sending
   // the messages out. (We have to hold it while accessing a live thread.)
-  ThreadListLock lock;
+  ScopedThreadListLock thread_list_lock;
 
   gDdmThreadNotification = enable;
   if (enable) {
diff --git a/src/dex2oat.cc b/src/dex2oat.cc
index e3b79d8..bcce5b6 100644
--- a/src/dex2oat.cc
+++ b/src/dex2oat.cc
@@ -75,10 +75,16 @@
 
   ~FileJanitor() {
     if (fd_ != -1) {
-      flock(fd_, LOCK_UN);
+      int rc = TEMP_FAILURE_RETRY(flock(fd_, LOCK_UN));
+      if (rc == -1) {
+        PLOG(ERROR) << "Failed to unlock " << filename_;
+      }
     }
     if (do_unlink_) {
-      unlink(filename_.c_str());
+      int rc = TEMP_FAILURE_RETRY(unlink(filename_.c_str()));
+      if (rc == -1) {
+        PLOG(ERROR) << "Failed to unlink " << filename_;
+      }
     }
   }
 
@@ -188,28 +194,29 @@
   // Handles removing the file on failure and unlocking on both failure and success.
   FileJanitor file_janitor(oat_filename, fd);
 
-  // TODO: TEMP_FAILURE_RETRY on flock calls
-
   // If we won the creation race, block trying to take the lock (since we're going to be doing
   // the work, we need the lock). If we lost the creation race, spin trying to take the lock
   // non-blocking until we fail -- at which point we know the other guy has the lock -- and then
   // block trying to take the now-taken lock.
   if (did_create) {
     LOG(INFO) << "This process created " << oat_filename;
-    while (flock(fd, LOCK_EX) != 0) {
+    while (TEMP_FAILURE_RETRY(flock(fd, LOCK_EX)) != 0) {
       // Try again.
     }
     LOG(INFO) << "This process created and locked " << oat_filename;
   } else {
     LOG(INFO) << "Another process has already created " << oat_filename;
-    while (flock(fd, LOCK_EX | LOCK_NB) == 0) {
+    while (TEMP_FAILURE_RETRY(flock(fd, LOCK_EX | LOCK_NB)) == 0) {
       // Give up the lock and hope the creator has taken the lock next time round.
-      flock(fd, LOCK_UN);
+      int rc = TEMP_FAILURE_RETRY(flock(fd, LOCK_UN));
+      if (rc == -1) {
+        PLOG(FATAL) << "Failed to unlock " << oat_filename;
+      }
     }
     // Now a non-blocking attempt to take the lock has failed, we know the other guy has the
     // lock, so block waiting to take it.
     LOG(INFO) << "Another process is already working on " << oat_filename;
-    if (flock(fd, LOCK_EX) != 0) {
+    if (TEMP_FAILURE_RETRY(flock(fd, LOCK_EX)) != 0) {
       PLOG(ERROR) << "Waiter unable to wait for creator to finish " << oat_filename;
       return EXIT_FAILURE;
     }
diff --git a/src/java_lang_Thread.cc b/src/java_lang_Thread.cc
index 891238e..9031471 100644
--- a/src/java_lang_Thread.cc
+++ b/src/java_lang_Thread.cc
@@ -35,7 +35,7 @@
 }
 
 jboolean Thread_isInterrupted(JNIEnv* env, jobject javaThread) {
-  ThreadListLock lock;
+  ScopedThreadListLock thread_list_lock;
   Thread* thread = Thread::FromManagedThread(env, javaThread);
   return (thread != NULL) ? thread->IsInterrupted() : JNI_FALSE;
 }
@@ -46,7 +46,7 @@
 }
 
 jint Thread_nativeGetStatus(JNIEnv* env, jobject javaThread) {
-  ThreadListLock lock;
+  ScopedThreadListLock thread_list_lock;
   Thread* thread = Thread::FromManagedThread(env, javaThread);
   if (thread == NULL) {
     return -1;
@@ -61,13 +61,13 @@
     Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "object == null");
     return JNI_FALSE;
   }
-  ThreadListLock lock;
+  ScopedThreadListLock thread_list_lock;
   Thread* thread = Thread::FromManagedThread(env, javaThread);
   return thread->HoldsLock(object);
 }
 
 void Thread_nativeInterrupt(JNIEnv* env, jobject javaThread) {
-  ThreadListLock lock;
+  ScopedThreadListLock thread_list_lock;
   Thread* thread = Thread::FromManagedThread(env, javaThread);
   if (thread != NULL) {
     thread->Interrupt();
@@ -75,7 +75,7 @@
 }
 
 void Thread_nativeSetName(JNIEnv* env, jobject javaThread, jstring javaName) {
-  ThreadListLock lock;
+  ScopedThreadListLock thread_list_lock;
   Thread* thread = Thread::FromManagedThread(env, javaThread);
   if (thread != NULL) {
     ScopedUtfChars name(env, javaName);
@@ -96,7 +96,7 @@
  * threads at Thread.NORM_PRIORITY (5).
  */
 void Thread_nativeSetPriority(JNIEnv* env, jobject javaThread, jint newPriority) {
-  ThreadListLock lock;
+  ScopedThreadListLock thread_list_lock;
   Thread* thread = Thread::FromManagedThread(env, javaThread);
   if (thread != NULL) {
     thread->SetNativePriority(newPriority);
diff --git a/src/mutex.cc b/src/mutex.cc
index 25d8b76..0dfab11 100644
--- a/src/mutex.cc
+++ b/src/mutex.cc
@@ -27,19 +27,12 @@
 namespace art {
 
 Mutex::Mutex(const char* name) : name_(name) {
-#ifndef NDEBUG
-  pthread_mutexattr_t debug_attributes;
-  CHECK_MUTEX_CALL(pthread_mutexattr_init, (&debug_attributes));
-#if VERIFY_OBJECT_ENABLED
-  CHECK_MUTEX_CALL(pthread_mutexattr_settype, (&debug_attributes, PTHREAD_MUTEX_RECURSIVE));
-#else
-  CHECK_MUTEX_CALL(pthread_mutexattr_settype, (&debug_attributes, PTHREAD_MUTEX_ERRORCHECK));
-#endif
-  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, &debug_attributes));
-  CHECK_MUTEX_CALL(pthread_mutexattr_destroy, (&debug_attributes));
-#else
-  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, NULL));
-#endif
+  // Like Java, we use recursive mutexes.
+  pthread_mutexattr_t attributes;
+  CHECK_MUTEX_CALL(pthread_mutexattr_init, (&attributes));
+  CHECK_MUTEX_CALL(pthread_mutexattr_settype, (&attributes, PTHREAD_MUTEX_RECURSIVE));
+  CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, &attributes));
+  CHECK_MUTEX_CALL(pthread_mutexattr_destroy, (&attributes));
 }
 
 Mutex::~Mutex() {
diff --git a/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index f423e38..1cda892 100644
--- a/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -74,7 +74,7 @@
  * NULL on failure, e.g. if the threadId couldn't be found.
  */
 static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint thin_lock_id) {
-  ThreadListLock thread_list_lock;
+  ScopedThreadListLock thread_list_lock;
   Thread* thread = FindThreadByThinLockId(static_cast<uint32_t>(thin_lock_id));
   if (thread == NULL) {
     return NULL;
@@ -132,7 +132,7 @@
 static jbyteArray DdmVmInternal_getThreadStats(JNIEnv* env, jclass) {
   std::vector<uint8_t> bytes;
   {
-    ThreadListLock thread_list_lock;
+    ScopedThreadListLock thread_list_lock;
     ThreadList* thread_list = Runtime::Current()->GetThreadList();
 
     uint16_t thread_count;
diff --git a/src/signal_catcher.cc b/src/signal_catcher.cc
index f25a9ba..d0cfe95 100644
--- a/src/signal_catcher.cc
+++ b/src/signal_catcher.cc
@@ -94,12 +94,6 @@
 void SignalCatcher::HandleSigQuit() {
   Runtime* runtime = Runtime::Current();
   ThreadList* thread_list = runtime->GetThreadList();
-  ClassLinker* class_linker = runtime->GetClassLinker();
-
-  LOG(INFO) << "Heap lock owner tid: " << Heap::GetLockOwner() << "\n"
-            << "ThreadList lock owner tid: " << thread_list->GetLockOwner() << "\n"
-            << "ClassLinker classes lock owner tid: " << class_linker->GetClassesLockOwner() << "\n"
-            << "ClassLinker dex lock owner tid: " << class_linker->GetDexLockOwner() << "\n";
 
   thread_list->SuspendAll();
 
@@ -134,8 +128,8 @@
   Heap::CollectGarbage();
 }
 
-int WaitForSignal(Thread* thread, sigset_t& mask) {
-  ScopedThreadStateChange tsc(thread, Thread::kVmWait);
+int SignalCatcher::WaitForSignal(sigset_t& mask) {
+  ScopedThreadStateChange tsc(thread_, Thread::kVmWait);
 
   // Signals for sigwait() must be blocked but not ignored.  We
   // block signals like SIGQUIT for all threads, so the condition
@@ -149,6 +143,25 @@
     PLOG(FATAL) << "sigwait failed";
   }
 
+  if (!ShouldHalt()) {
+    // Let the user know we got the signal, just in case the system's too screwed for us to
+    // actually do what they want us to do...
+    LOG(INFO) << *thread_ << ": reacting to signal " << signal_number;
+
+    // If anyone's holding locks (and so we might fail to get back into state Runnable), say so...
+    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+    pid_t heap_lock_owner = Heap::GetLockOwner();
+    pid_t thread_list_lock_owner = Runtime::Current()->GetThreadList()->GetLockOwner();
+    pid_t classes_lock_owner = class_linker->GetClassesLockOwner();
+    pid_t dex_lock_owner = class_linker->GetDexLockOwner();
+    if ((heap_lock_owner | thread_list_lock_owner | classes_lock_owner | dex_lock_owner) != 0) {
+      LOG(INFO) << "Heap lock owner tid: " << heap_lock_owner << "\n"
+                << "ThreadList lock owner tid: " << thread_list_lock_owner << "\n"
+                << "ClassLinker classes lock owner tid: " << classes_lock_owner << "\n"
+                << "ClassLinker dex lock owner tid: " << dex_lock_owner << "\n";
+    }
+  }
+
   return signal_number;
 }
 
@@ -173,13 +186,12 @@
   sigaddset(&mask, SIGUSR1);
 
   while (true) {
-    int signal_number = WaitForSignal(signal_catcher->thread_, mask);
+    int signal_number = signal_catcher->WaitForSignal(mask);
     if (signal_catcher->ShouldHalt()) {
       runtime->DetachCurrentThread();
       return NULL;
     }
 
-    LOG(INFO) << *signal_catcher->thread_ << ": reacting to signal " << signal_number;
     switch (signal_number) {
     case SIGQUIT:
       signal_catcher->HandleSigQuit();
diff --git a/src/signal_catcher.h b/src/signal_catcher.h
index 5178b1a..89132c9 100644
--- a/src/signal_catcher.h
+++ b/src/signal_catcher.h
@@ -43,6 +43,7 @@
   void Output(const std::string& s);
   void SetHaltFlag(bool new_value);
   bool ShouldHalt();
+  int WaitForSignal(sigset_t& mask);
 
   std::string stack_trace_file_;
   mutable Mutex lock_;
diff --git a/src/thread_list.cc b/src/thread_list.cc
index 6da5e68..4b33908 100644
--- a/src/thread_list.cc
+++ b/src/thread_list.cc
@@ -22,33 +22,43 @@
 
 namespace art {
 
-// TODO: merge with ThreadListLock?
-class ThreadListLocker {
- public:
+ScopedThreadListLock::ScopedThreadListLock() {
+  // Self may be null during shutdown.
+  Thread* self = Thread::Current();
 
-  explicit ThreadListLocker(const ThreadList* thread_list) : thread_list_(thread_list) {
-    // Avoid deadlock between two threads trying to SuspendAll
-    // simultaneously by going to kVmWait if the lock cannot be
-    // immediately acquired.
-    if (!thread_list_->thread_list_lock_.TryLock()) {
-      Thread* self = Thread::Current();
-      if (self == NULL) {
-          thread_list_->thread_list_lock_.Lock();
-      } else {
-          ScopedThreadStateChange tsc(self, Thread::kVmWait);
-          thread_list_->thread_list_lock_.Lock();
-      }
+  // We insist that anyone taking the thread list lock already has the heap lock,
+  // because pretty much any time someone takes the thread list lock, they may
+  // end up needing the heap lock (even removing a thread from the thread list calls
+  // back into managed code to remove the thread from its ThreadGroup, and that allocates
+  // an iterator).
+  // TODO: this makes the distinction between the two locks pretty pointless.
+  if (self != NULL) {
+    Heap::Lock();
+  }
+
+  // Avoid deadlock between two threads trying to SuspendAll
+  // simultaneously by going to kVmWait if the lock cannot be
+  // immediately acquired.
+  // TODO: is this needed if we took the heap lock? taking the heap lock will have done this,
+  // and the other thread will now be in kVmWait waiting for the heap lock.
+  ThreadList* thread_list = Runtime::Current()->GetThreadList();
+  if (!thread_list->thread_list_lock_.TryLock()) {
+    if (self == NULL) {
+      thread_list->thread_list_lock_.Lock();
+    } else {
+      ScopedThreadStateChange tsc(self, Thread::kVmWait);
+      thread_list->thread_list_lock_.Lock();
     }
   }
 
-  ~ThreadListLocker() {
-    thread_list_->thread_list_lock_.Unlock();
+  if (self != NULL) {
+    Heap::Unlock();
   }
+}
 
- private:
-  const ThreadList* thread_list_;
-  DISALLOW_COPY_AND_ASSIGN(ThreadListLocker);
-};
+ScopedThreadListLock::~ScopedThreadListLock() {
+  Runtime::Current()->GetThreadList()->thread_list_lock_.Unlock();
+}
 
 ThreadList::ThreadList(bool verbose)
     : verbose_(verbose),
@@ -78,7 +88,7 @@
 }
 
 void ThreadList::Dump(std::ostream& os) {
-  ThreadListLocker locker(this);
+  ScopedThreadListLock thread_list_lock;
   os << "DALVIK THREADS (" << list_.size() << "):\n";
   for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
     (*it)->Dump(os);
@@ -140,7 +150,7 @@
   }
 
   CHECK_EQ(self->GetState(), Thread::kRunnable);
-  ThreadListLocker locker(this);
+  ScopedThreadListLock thread_list_lock;
   Thread* debug_thread = Dbg::GetDebugThread();
 
   {
@@ -225,7 +235,7 @@
   // Collisions with other suspends aren't really interesting. We want
   // to ensure that we're the only one fiddling with the suspend count
   // though.
-  ThreadListLocker locker(this);
+  ScopedThreadListLock thread_list_lock;
   MutexLock mu(thread_suspend_count_lock_);
   ModifySuspendCount(self, +1, true);
 
@@ -270,7 +280,7 @@
   // writes, since nobody should be moving until we decrement the count.
   // We do need to hold the thread list because of JNI attaches.
   {
-    ThreadListLocker locker(this);
+    ScopedThreadListLock thread_list_lock;
     Thread* debug_thread = Dbg::GetDebugThread();
     MutexLock mu(thread_suspend_count_lock_);
     for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
@@ -346,7 +356,7 @@
   }
 
   {
-    ThreadListLocker locker(this);
+    ScopedThreadListLock thread_list_lock;
     MutexLock mu(thread_suspend_count_lock_);
     for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
       Thread* thread = *it;
@@ -374,7 +384,7 @@
     LOG(INFO) << "ThreadList::Register() " << *self << "\n" << Dumpable<Thread>(*self);
   }
 
-  ThreadListLocker locker(this);
+  ScopedThreadListLock thread_list_lock;
   CHECK(!Contains(self));
   list_.push_back(self);
 }
@@ -399,7 +409,7 @@
       self->RemoveFromThreadGroup();
   }
 
-  ThreadListLocker locker(this);
+  ScopedThreadListLock thread_list_lock;
 
   // Remove this thread from the list.
   CHECK(Contains(self));
@@ -426,7 +436,7 @@
 }
 
 void ThreadList::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
-  ThreadListLocker locker(this);
+  ScopedThreadListLock thread_list_lock;
   for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
     (*it)->VisitRoots(visitor, arg);
   }
@@ -448,7 +458,7 @@
   CHECK(child != self);
 
   {
-    ThreadListLocker locker(this);
+    ScopedThreadListLock thread_list_lock;
     if (verbose_) {
       LOG(INFO) << *self << " waiting for child " << *child << " to be in thread list...";
     }
@@ -464,7 +474,7 @@
   self->SetState(Thread::kRunnable);
 
   // Tell the child that it's safe: it will see any future suspend request.
-  ThreadListLocker locker(this);
+  ScopedThreadListLock thread_list_lock;
   if (verbose_) {
     LOG(INFO) << *self << " telling child " << *child << " it's safe to proceed...";
   }
@@ -477,7 +487,7 @@
   DCHECK(Contains(self));
 
   {
-    ThreadListLocker locker(this);
+    ScopedThreadListLock thread_list_lock;
 
     // Tell our parent that we're in the thread list.
     if (verbose_) {
@@ -521,14 +531,14 @@
 }
 
 void ThreadList::WaitForNonDaemonThreadsToExit() {
-  ThreadListLocker locker(this);
+  ScopedThreadListLock thread_list_lock;
   while (!AllThreadsAreDaemons()) {
     thread_exit_cond_.Wait(thread_list_lock_);
   }
 }
 
 void ThreadList::SuspendAllDaemonThreads() {
-  ThreadListLocker locker(this);
+  ScopedThreadListLock thread_list_lock;
 
   // Tell all the daemons it's time to suspend. (At this point, we know
   // all threads are daemons.)
@@ -562,7 +572,7 @@
 }
 
 uint32_t ThreadList::AllocThreadId() {
-  ThreadListLocker locker(this);
+  ScopedThreadListLock thread_list_lock;
   for (size_t i = 0; i < allocated_ids_.size(); ++i) {
     if (!allocated_ids_[i]) {
       allocated_ids_.set(i);
diff --git a/src/thread_list.h b/src/thread_list.h
index 7bbd866..57f9cf4 100644
--- a/src/thread_list.h
+++ b/src/thread_list.h
@@ -83,36 +83,18 @@
   ConditionVariable thread_suspend_count_cond_;
 
   friend class Thread;
-  friend class ThreadListLock;
-  friend class ThreadListLocker;
+  friend class ScopedThreadListLock;
 
   DISALLOW_COPY_AND_ASSIGN(ThreadList);
 };
 
-class ThreadListLock {
+class ScopedThreadListLock {
  public:
-  explicit ThreadListLock(Thread* self = NULL) {
-    if (self == NULL) {
-      // Try to get it from TLS.
-      self = Thread::Current();
-    }
-    // self may still be NULL during VM shutdown.
-    Thread::State old_state;
-    if (self != NULL) {
-      old_state = self->SetState(Thread::kVmWait);
-    }
-    Runtime::Current()->GetThreadList()->thread_list_lock_.Lock();
-    if (self != NULL) {
-      self->SetState(old_state);
-    }
-  }
-
-  ~ThreadListLock() {
-    Runtime::Current()->GetThreadList()->thread_list_lock_.Unlock();
-  }
+  ScopedThreadListLock();
+  ~ScopedThreadListLock();
 
  private:
-  DISALLOW_COPY_AND_ASSIGN(ThreadListLock);
+  DISALLOW_COPY_AND_ASSIGN(ScopedThreadListLock);
 };
 
 }  // namespace art