Use static thread safety analysis when available, and fix the bugs GCC finds.

It's impossible to express the Heap locking and the ThreadList locking with
GCC, but Clang is supposed to be able to do it. This patch does what's possible
for now.

Change-Id: Ib64a890c9d27c6ce255d5003cb755c2ef1beba95
diff --git a/src/class_linker.cc b/src/class_linker.cc
index a692e2c..47beadb 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -658,6 +658,7 @@
 }
 
 const OatFile* ClassLinker::FindOpenedOatFileFromDexLocation(const std::string& dex_location) {
+  MutexLock mu(dex_lock_);
   for (size_t i = 0; i < oat_files_.size(); i++) {
     const OatFile* oat_file = oat_files_[i];
     DCHECK(oat_file != NULL);
@@ -807,6 +808,7 @@
 }
 
 const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& oat_location) {
+  MutexLock mu(dex_lock_);
   for (size_t i = 0; i < oat_files_.size(); i++) {
     const OatFile* oat_file = oat_files_[i];
     DCHECK(oat_file != NULL);
@@ -928,8 +930,11 @@
 void ClassLinker::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
   visitor(class_roots_, arg);
 
-  for (size_t i = 0; i < dex_caches_.size(); i++) {
-    visitor(dex_caches_[i], arg);
+  {
+    MutexLock mu(dex_lock_);
+    for (size_t i = 0; i < dex_caches_.size(); i++) {
+      visitor(dex_caches_[i], arg);
+    }
   }
 
   {
@@ -1831,11 +1836,11 @@
   size_t hash = StringPieceHash()(descriptor);
   MutexLock mu(classes_lock_);
   Table& classes = image_class ? image_classes_ : classes_;
-  Class* existing = LookupClass(descriptor.data(), klass->GetClassLoader(), hash, classes);
+  Class* existing = LookupClassLocked(descriptor.data(), klass->GetClassLoader(), hash, classes);
 #ifndef NDEBUG
   // Check we don't have the class in the other table in error
   Table& other_classes = image_class ? classes_ : image_classes_;
-  CHECK(LookupClass(descriptor.data(), klass->GetClassLoader(), hash, other_classes) == NULL);
+  CHECK(LookupClassLocked(descriptor.data(), klass->GetClassLoader(), hash, other_classes) == NULL);
 #endif
   if (existing != NULL) {
     return existing;
@@ -1873,15 +1878,15 @@
   size_t hash = Hash(descriptor);
   MutexLock mu(classes_lock_);
   // TODO: determine if its better to search classes_ or image_classes_ first
-  Class* klass = LookupClass(descriptor, class_loader, hash, classes_);
+  Class* klass = LookupClassLocked(descriptor, class_loader, hash, classes_);
   if (klass != NULL) {
     return klass;
   }
-  return LookupClass(descriptor, class_loader, hash, image_classes_);
+  return LookupClassLocked(descriptor, class_loader, hash, image_classes_);
 }
 
-Class* ClassLinker::LookupClass(const char* descriptor, const ClassLoader* class_loader,
-                                size_t hash, const Table& classes) {
+Class* ClassLinker::LookupClassLocked(const char* descriptor, const ClassLoader* class_loader,
+                                      size_t hash, const Table& classes) {
   ClassHelper kh(NULL, this);
   typedef Table::const_iterator It;  // TODO: C++0x auto
   for (It it = classes.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) {
@@ -3513,6 +3518,7 @@
 }
 
 void ClassLinker::RelocateExecutable() {
+  MutexLock mu(dex_lock_);
   for (size_t i = 0; i < oat_files_.size(); ++i) {
     const_cast<OatFile*>(oat_files_[i])->RelocateExecutable();
   }
diff --git a/src/class_linker.h b/src/class_linker.h
index 1925a9d..293e3ab 100644
--- a/src/class_linker.h
+++ b/src/class_linker.h
@@ -355,9 +355,9 @@
   // the same descriptor and ClassLoader.
   Class* InsertClass(const StringPiece& descriptor, Class* klass, bool image_class);
 
-  void RegisterDexFileLocked(const DexFile& dex_file, SirtRef<DexCache>& dex_cache);
-  bool IsDexFileRegisteredLocked(const DexFile& dex_file) const;
-  void RegisterOatFileLocked(const OatFile& oat_file);
+  void RegisterDexFileLocked(const DexFile& dex_file, SirtRef<DexCache>& dex_cache) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_);
+  bool IsDexFileRegisteredLocked(const DexFile& dex_file) const EXCLUSIVE_LOCKS_REQUIRED(dex_lock_);
+  void RegisterOatFileLocked(const OatFile& oat_file) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_);
 
   bool InitializeClass(Class* klass, bool can_run_clinit, bool can_init_statics);
   bool WaitForInitializeClass(Class* klass, Thread* self, ObjectLock& lock);
@@ -410,23 +410,22 @@
 
   std::vector<const DexFile*> boot_class_path_;
 
-  std::vector<const DexFile*> dex_files_;
-  std::vector<DexCache*> dex_caches_;
-  std::vector<const OatFile*> oat_files_;
-  // lock to protect concurrent access to dex_files_, dex_caches_, and oat_files_
   mutable Mutex dex_lock_;
+  std::vector<const DexFile*> dex_files_ GUARDED_BY(dex_lock_);
+  std::vector<DexCache*> dex_caches_ GUARDED_BY(dex_lock_);
+  std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
 
 
   // multimap from a string hash code of a class descriptor to
   // Class* instances. Results should be compared for a matching
   // Class::descriptor_ and Class::class_loader_.
-  // Protected by classes_lock_
-  typedef std::multimap<size_t, Class*> Table;
-  Class* LookupClass(const char* descriptor, const ClassLoader* class_loader,
-                     size_t hash, const Table& classes);
-  Table image_classes_;
-  Table classes_;
   mutable Mutex classes_lock_;
+  typedef std::multimap<size_t, Class*> Table;
+  Table image_classes_  GUARDED_BY(classes_lock_);
+  Table classes_ GUARDED_BY(classes_lock_);
+
+  Class* LookupClassLocked(const char* descriptor, const ClassLoader* class_loader,
+                           size_t hash, const Table& classes) EXCLUSIVE_LOCKS_REQUIRED(classes_lock_);
 
   // indexes into class_roots_.
   // needs to be kept in sync with class_roots_descriptors_.
diff --git a/src/compiler.h b/src/compiler.h
index 74d3205..5a46de6 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -299,23 +299,23 @@
   typedef SafeMap<const ClassReference, CompiledClass*> ClassTable;
   // All class references that this compiler has compiled
   mutable Mutex compiled_classes_lock_;
-  ClassTable compiled_classes_;
+  ClassTable compiled_classes_ GUARDED_BY(compiled_classes_lock_);
 
   typedef SafeMap<const MethodReference, CompiledMethod*> MethodTable;
   // All method references that this compiler has compiled
   mutable Mutex compiled_methods_lock_;
-  MethodTable compiled_methods_;
+  MethodTable compiled_methods_ GUARDED_BY(compiled_methods_lock_);
 
   typedef SafeMap<std::string, const CompiledInvokeStub*> InvokeStubTable;
   // Invocation stubs created to allow invocation of the compiled methods
   mutable Mutex compiled_invoke_stubs_lock_;
-  InvokeStubTable compiled_invoke_stubs_;
+  InvokeStubTable compiled_invoke_stubs_ GUARDED_BY(compiled_invoke_stubs_lock_);
 
 #if defined(ART_USE_LLVM_COMPILER)
   typedef SafeMap<std::string, const CompiledInvokeStub*> ProxyStubTable;
   // Proxy stubs created for proxy invocation delegation
   mutable Mutex compiled_proxy_stubs_lock_;
-  ProxyStubTable compiled_proxy_stubs_;
+  ProxyStubTable compiled_proxy_stubs_ GUARDED_BY(compiled_proxy_stubs_lock_);
 #endif
 
   bool image_;
diff --git a/src/debugger.cc b/src/debugger.cc
index 8549ae0..eb5efa2 100644
--- a/src/debugger.cc
+++ b/src/debugger.cc
@@ -171,14 +171,14 @@
 
 // Recent allocation tracking.
 static Mutex gAllocTrackerLock("AllocTracker lock");
-AllocRecord* Dbg::recent_allocation_records_ = NULL; // TODO: CircularBuffer<AllocRecord>
-static size_t gAllocRecordHead = 0;
-static size_t gAllocRecordCount = 0;
+AllocRecord* Dbg::recent_allocation_records_ PT_GUARDED_BY(gAllocTrackerLock) = NULL; // TODO: CircularBuffer<AllocRecord>
+static size_t gAllocRecordHead GUARDED_BY(gAllocTrackerLock) = 0;
+static size_t gAllocRecordCount GUARDED_BY(gAllocTrackerLock) = 0;
 
 // Breakpoints and single-stepping.
 static Mutex gBreakpointsLock("breakpoints lock");
-static std::vector<Breakpoint> gBreakpoints;
-static SingleStepControl gSingleStepControl;
+static std::vector<Breakpoint> gBreakpoints GUARDED_BY(gBreakpointsLock);
+static SingleStepControl gSingleStepControl GUARDED_BY(gBreakpointsLock);
 
 static bool IsBreakpoint(Method* m, uint32_t dex_pc) {
   MutexLock mu(gBreakpointsLock);
@@ -479,9 +479,7 @@
 }
 
 static void SetDebuggerUpdatesEnabled(bool enabled) {
-  Runtime* runtime = Runtime::Current();
-  ScopedThreadListLock thread_list_lock;
-  runtime->GetThreadList()->ForEach(SetDebuggerUpdatesEnabledCallback, &enabled);
+  Runtime::Current()->GetThreadList()->ForEach(SetDebuggerUpdatesEnabledCallback, &enabled);
 }
 
 void Dbg::GoActive() {
@@ -1443,10 +1441,7 @@
   ThreadListVisitor tlv;
   tlv.thread_group = thread_group;
 
-  {
-    ScopedThreadListLock thread_list_lock;
-    Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv);
-  }
+  Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv);
 
   *pThreadCount = tlv.threads.size();
   if (*pThreadCount == 0) {
@@ -1819,6 +1814,7 @@
 
   // If the debugger is single-stepping one of our threads, check to
   // see if we're that thread and we've reached a step point.
+  MutexLock mu(gBreakpointsLock);
   if (gSingleStepControl.is_active && gSingleStepControl.thread == self) {
     CHECK(!m->IsNative());
     if (gSingleStepControl.step_depth == JDWP::SD_INTO) {
@@ -1926,6 +1922,8 @@
     return JDWP::ERR_INVALID_THREAD;
   }
 
+  MutexLock mu(gBreakpointsLock);
+
   // TODO: there's no theoretical reason why we couldn't support single-stepping
   // of multiple threads at once, but we never did so historically.
   if (gSingleStepControl.thread != NULL && thread != gSingleStepControl.thread) {
@@ -1940,10 +1938,12 @@
 
   struct SingleStepStackVisitor : public Thread::StackVisitor {
     SingleStepStackVisitor() {
+      MutexLock mu(gBreakpointsLock); // Keep GCC happy.
       gSingleStepControl.method = NULL;
       gSingleStepControl.stack_depth = 0;
     }
     bool VisitFrame(const Frame& f, uintptr_t pc) {
+      MutexLock mu(gBreakpointsLock); // Keep GCC happy.
       if (f.HasMethod()) {
         ++gSingleStepControl.stack_depth;
         if (gSingleStepControl.method == NULL) {
@@ -1974,6 +1974,7 @@
     }
 
     static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) {
+      MutexLock mu(gBreakpointsLock); // Keep GCC happy.
       DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
       if (static_cast<int32_t>(line_number) == gSingleStepControl.line_number) {
         if (!context->last_pc_valid) {
@@ -1994,6 +1995,7 @@
     }
 
     ~DebugCallbackContext() {
+      MutexLock mu(gBreakpointsLock); // Keep GCC happy.
       // If the line number was the last in the position table...
       if (last_pc_valid) {
         size_t end = MethodHelper(gSingleStepControl.method).GetCodeItem()->insns_size_in_code_units_;
@@ -2043,6 +2045,8 @@
 }
 
 void Dbg::UnconfigureStep(JDWP::ObjectId /*threadId*/) {
+  MutexLock mu(gBreakpointsLock);
+
   gSingleStepControl.is_active = false;
   gSingleStepControl.thread = NULL;
   gSingleStepControl.dex_pcs.clear();
@@ -2890,7 +2894,7 @@
 //
 // We need to handle underflow in our circular buffer, so we add
 // kNumAllocRecords and then mask it back down.
-static inline int HeadIndex() {
+static inline int HeadIndex() EXCLUSIVE_LOCKS_REQUIRED(gAllocTrackerLock) {
   return (gAllocRecordHead+1 + kNumAllocRecords - gAllocRecordCount) & (kNumAllocRecords-1);
 }
 
diff --git a/src/dex_file.h b/src/dex_file.h
index 29ce087..9fcdeb0 100644
--- a/src/dex_file.h
+++ b/src/dex_file.h
@@ -866,7 +866,7 @@
 
   // A cached com.android.dex.Dex instance, possibly NULL. Use GetDexObject.
   mutable Mutex dex_object_lock_;
-  mutable jobject dex_object_;
+  mutable jobject dex_object_ GUARDED_BY(dex_object_lock_);
 
   // Points to the header section.
   const Header* header_;
diff --git a/src/heap.h b/src/heap.h
index e205f7a..96a9ee6 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -42,7 +42,7 @@
 class SpaceTest;
 class Thread;
 
-class Heap {
+class LOCKABLE Heap {
   friend class ScopedHeapLock;
  public:
   static const size_t kInitialSize = 2 * MB;
@@ -236,13 +236,14 @@
   void DumpForSigQuit(std::ostream& os);
 
   void Trim();
+
  private:
   // Allocates uninitialized storage.
   Object* AllocateLocked(size_t num_bytes);
   Object* AllocateLocked(AllocSpace* space, size_t num_bytes);
 
-  void Lock();
-  void Unlock();
+  void Lock() EXCLUSIVE_LOCK_FUNCTION();
+  void Unlock() UNLOCK_FUNCTION();
 
   // Pushes a list of cleared references out to the managed heap.
   void EnqueueClearedReferences(Object** cleared_references);
@@ -253,7 +254,8 @@
   void RecordAllocationLocked(AllocSpace* space, const Object* object);
   void RecordImageAllocations(Space* space);
 
-  void CollectGarbageInternal(bool concurrent, bool clear_soft_references);
+  // TODO: can we teach GCC to understand the weird locking in here?
+  void CollectGarbageInternal(bool concurrent, bool clear_soft_references) NO_THREAD_SAFETY_ANALYSIS;
 
   // Given the current contents of the alloc space, increase the allowed heap footprint to match
   // the target utilization ratio.  This should only be called immediately after a full garbage
diff --git a/src/intern_table.h b/src/intern_table.h
index 58d87d0..04c75d0 100644
--- a/src/intern_table.h
+++ b/src/intern_table.h
@@ -76,9 +76,9 @@
   void Remove(Table& table, const String* s, uint32_t hash_code);
 
   mutable Mutex intern_table_lock_;
-  Table image_strong_interns_;
-  Table strong_interns_;
-  Table weak_interns_;
+  Table image_strong_interns_ GUARDED_BY(intern_table_lock_);
+  Table strong_interns_ GUARDED_BY(intern_table_lock_);
+  Table weak_interns_ GUARDED_BY(intern_table_lock_);
 };
 
 }  // namespace art
diff --git a/src/jdwp/jdwp.h b/src/jdwp/jdwp.h
index 7d8bf22..4e9b0a0 100644
--- a/src/jdwp/jdwp.h
+++ b/src/jdwp/jdwp.h
@@ -118,7 +118,7 @@
    *
    * Returns a newly-allocated JdwpState struct on success, or NULL on failure.
    */
-  static JdwpState* Create(const JdwpOptions* options);
+  static JdwpState* Create(const JdwpOptions* options) NO_THREAD_SAFETY_ANALYSIS; // TODO: make GCC understand.
 
   ~JdwpState();
 
@@ -264,11 +264,15 @@
   explicit JdwpState(const JdwpOptions* options);
   bool InvokeInProgress();
   bool IsConnected();
-  void SuspendByPolicy(JdwpSuspendPolicy suspendPolicy);
-  void CleanupMatchList(JdwpEvent** matchList, int matchCount);
+  void SuspendByPolicy(JdwpSuspendPolicy suspend_policy);
+  void CleanupMatchList(JdwpEvent** match_list,
+                        int match_count) EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_);
   void EventFinish(ExpandBuf* pReq);
-  void FindMatchingEvents(JdwpEventKind eventKind, ModBasket* basket, JdwpEvent** matchList, int* pMatchCount);
-  void UnregisterEvent(JdwpEvent* pEvent);
+  void FindMatchingEvents(JdwpEventKind eventKind,
+                          ModBasket* basket,
+                          JdwpEvent** match_list,
+                          int* pMatchCount) EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_);
+  void UnregisterEvent(JdwpEvent* pEvent) EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_);
 
 public: // TODO: fix privacy
   const JdwpOptions* options_;
@@ -299,17 +303,17 @@
   int64_t lastActivityWhen;
 
   /* global counters and a mutex to protect them */
-  uint32_t requestSerial;
-  uint32_t eventSerial;
   Mutex serial_lock_;
+  uint32_t request_serial_ GUARDED_BY(serial_lock_);
+  uint32_t event_serial_ GUARDED_BY(serial_lock_);
 
   /*
    * Events requested by the debugger (breakpoints, class prep, etc).
    */
 public: // TODO: fix privacy
-  int numEvents;      /* #of elements in eventList */
-  JdwpEvent* eventList;      /* linked list of events */
-  Mutex event_lock_;      /* guards numEvents/eventList */
+  Mutex event_list_lock_;
+  JdwpEvent* event_list_ GUARDED_BY(event_list_lock_); // Linked list of events.
+  int event_list_size_ GUARDED_BY(event_list_lock_); // Number of elements in event_list_.
 private:
 
   /*
diff --git a/src/jdwp/jdwp_event.cc b/src/jdwp/jdwp_event.cc
index 6476c3c..a92590d 100644
--- a/src/jdwp/jdwp_event.cc
+++ b/src/jdwp/jdwp_event.cc
@@ -124,7 +124,7 @@
  */
 static void dumpEvent(const JdwpEvent* pEvent) {
   LOG(INFO) << StringPrintf("Event id=0x%4x %p (prev=%p next=%p):", pEvent->requestId, pEvent, pEvent->prev, pEvent->next);
-  LOG(INFO) << "  kind=" << pEvent->eventKind << " susp=" << pEvent->suspendPolicy << " modCount=" << pEvent->modCount;
+  LOG(INFO) << "  kind=" << pEvent->eventKind << " susp=" << pEvent->suspend_policy << " modCount=" << pEvent->modCount;
 
   for (int i = 0; i < pEvent->modCount; i++) {
     const JdwpEventMod* pMod = &pEvent->mods[i];
@@ -141,7 +141,7 @@
  * not be added to the list, and an appropriate error will be returned.
  */
 JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) {
-  MutexLock mu(event_lock_);
+  MutexLock mu(event_list_lock_);
 
   CHECK(pEvent != NULL);
   CHECK(pEvent->prev == NULL);
@@ -173,12 +173,12 @@
   /*
    * Add to list.
    */
-  if (eventList != NULL) {
-    pEvent->next = eventList;
-    eventList->prev = pEvent;
+  if (event_list_ != NULL) {
+    pEvent->next = event_list_;
+    event_list_->prev = pEvent;
   }
-  eventList = pEvent;
-  numEvents++;
+  event_list_ = pEvent;
+  ++event_list_size_;
 
   return ERR_NONE;
 }
@@ -194,9 +194,9 @@
 void JdwpState::UnregisterEvent(JdwpEvent* pEvent) {
   if (pEvent->prev == NULL) {
     /* head of the list */
-    CHECK(eventList == pEvent);
+    CHECK(event_list_ == pEvent);
 
-    eventList = pEvent->next;
+    event_list_ = pEvent->next;
   } else {
     pEvent->prev->next = pEvent->next;
   }
@@ -222,8 +222,8 @@
     }
   }
 
-  numEvents--;
-  CHECK(numEvents != 0 || eventList == NULL);
+  --event_list_size_;
+  CHECK(event_list_size_ != 0 || event_list_ == NULL);
 }
 
 /*
@@ -234,9 +234,9 @@
  * explicitly remove one-off single-step events.)
  */
 void JdwpState::UnregisterEventById(uint32_t requestId) {
-  MutexLock mu(event_lock_);
+  MutexLock mu(event_list_lock_);
 
-  JdwpEvent* pEvent = eventList;
+  JdwpEvent* pEvent = event_list_;
   while (pEvent != NULL) {
     if (pEvent->requestId == requestId) {
       UnregisterEvent(pEvent);
@@ -254,9 +254,9 @@
  * Remove all entries from the event list.
  */
 void JdwpState::UnregisterAll() {
-  MutexLock mu(event_lock_);
+  MutexLock mu(event_list_lock_);
 
-  JdwpEvent* pEvent = eventList;
+  JdwpEvent* pEvent = event_list_;
   while (pEvent != NULL) {
     JdwpEvent* pNextEvent = pEvent->next;
 
@@ -265,7 +265,7 @@
     pEvent = pNextEvent;
   }
 
-  eventList = NULL;
+  event_list_ = NULL;
 }
 
 /*
@@ -293,7 +293,7 @@
   /* make sure it was removed from the list */
   CHECK(pEvent->prev == NULL);
   CHECK(pEvent->next == NULL);
-  /* want to check state->eventList != pEvent */
+  /* want to check state->event_list_ != pEvent */
 
   /*
    * Free any hairy bits in the mods.
@@ -326,8 +326,8 @@
  * Run through the list and remove any entries with an expired "count" mod
  * from the event list, then free the match list.
  */
-void JdwpState::CleanupMatchList(JdwpEvent** matchList, int match_count) {
-  JdwpEvent** ppEvent = matchList;
+void JdwpState::CleanupMatchList(JdwpEvent** match_list, int match_count) {
+  JdwpEvent** ppEvent = match_list;
 
   while (match_count--) {
     JdwpEvent* pEvent = *ppEvent;
@@ -344,7 +344,7 @@
     ppEvent++;
   }
 
-  delete[] matchList;
+  delete[] match_list;
 }
 
 /*
@@ -446,20 +446,20 @@
  * Find all events of type "eventKind" with mods that match up with the
  * rest of the arguments.
  *
- * Found events are appended to "matchList", and "*pMatchCount" is advanced,
+ * Found events are appended to "match_list", and "*pMatchCount" is advanced,
  * so this may be called multiple times for grouped events.
  *
  * DO NOT call this multiple times for the same eventKind, as Count mods are
  * decremented during the scan.
  */
-void JdwpState::FindMatchingEvents(JdwpEventKind eventKind, ModBasket* basket, JdwpEvent** matchList, int* pMatchCount) {
+void JdwpState::FindMatchingEvents(JdwpEventKind eventKind, ModBasket* basket, JdwpEvent** match_list, int* pMatchCount) {
   /* start after the existing entries */
-  matchList += *pMatchCount;
+  match_list += *pMatchCount;
 
-  JdwpEvent* pEvent = eventList;
+  JdwpEvent* pEvent = event_list_;
   while (pEvent != NULL) {
     if (pEvent->eventKind == eventKind && ModsMatch(pEvent, basket)) {
-      *matchList++ = pEvent;
+      *match_list++ = pEvent;
       (*pMatchCount)++;
     }
 
@@ -471,14 +471,14 @@
  * Scan through the list of matches and determine the most severe
  * suspension policy.
  */
-static JdwpSuspendPolicy scanSuspendPolicy(JdwpEvent** matchList, int match_count) {
+static JdwpSuspendPolicy scanSuspendPolicy(JdwpEvent** match_list, int match_count) {
   JdwpSuspendPolicy policy = SP_NONE;
 
   while (match_count--) {
-    if ((*matchList)->suspendPolicy > policy) {
-      policy = (*matchList)->suspendPolicy;
+    if ((*match_list)->suspend_policy > policy) {
+      policy = (*match_list)->suspend_policy;
     }
-    matchList++;
+    match_list++;
   }
 
   return policy;
@@ -490,16 +490,16 @@
  *  SP_EVENT_THREAD - suspend ourselves
  *  SP_ALL - suspend everybody except JDWP support thread
  */
-void JdwpState::SuspendByPolicy(JdwpSuspendPolicy suspendPolicy) {
-  VLOG(jdwp) << "SuspendByPolicy(" << suspendPolicy << ")";
-  if (suspendPolicy == SP_NONE) {
+void JdwpState::SuspendByPolicy(JdwpSuspendPolicy suspend_policy) {
+  VLOG(jdwp) << "SuspendByPolicy(" << suspend_policy << ")";
+  if (suspend_policy == SP_NONE) {
     return;
   }
 
-  if (suspendPolicy == SP_ALL) {
+  if (suspend_policy == SP_ALL) {
     Dbg::SuspendVM();
   } else {
-    CHECK_EQ(suspendPolicy, SP_EVENT_THREAD);
+    CHECK_EQ(suspend_policy, SP_EVENT_THREAD);
   }
 
   /* this is rare but possible -- see CLASS_PREPARE handling */
@@ -645,23 +645,23 @@
  * must be for the main thread.
  */
 bool JdwpState::PostVMStart() {
-  JdwpSuspendPolicy suspendPolicy;
+  JdwpSuspendPolicy suspend_policy;
   ObjectId threadId = Dbg::GetThreadSelfId();
 
   if (options_->suspend) {
-    suspendPolicy = SP_ALL;
+    suspend_policy = SP_ALL;
   } else {
-    suspendPolicy = SP_NONE;
+    suspend_policy = SP_NONE;
   }
 
   ExpandBuf* pReq = eventPrep();
   {
-    MutexLock mu(event_lock_); // probably don't need this here
+    MutexLock mu(event_list_lock_); // probably don't need this here
 
     VLOG(jdwp) << "EVENT: " << EK_VM_START;
-    VLOG(jdwp) << "  suspendPolicy=" << suspendPolicy;
+    VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
 
-    expandBufAdd1(pReq, suspendPolicy);
+    expandBufAdd1(pReq, suspend_policy);
     expandBufAdd4BE(pReq, 1);
 
     expandBufAdd1(pReq, EK_VM_START);
@@ -672,13 +672,13 @@
   /* send request and possibly suspend ourselves */
   if (pReq != NULL) {
     int old_state = Dbg::ThreadWaiting();
-    if (suspendPolicy != SP_NONE) {
+    if (suspend_policy != SP_NONE) {
       SetWaitForEventThread(threadId);
     }
 
     EventFinish(pReq);
 
-    SuspendByPolicy(suspendPolicy);
+    SuspendByPolicy(suspend_policy);
     Dbg::ThreadContinuing(old_state);
   }
 
@@ -741,61 +741,62 @@
     return false;
   }
 
-  JdwpEvent** matchList = AllocMatchList(numEvents);
+  JdwpEvent** match_list = NULL;
   int match_count = 0;
   ExpandBuf* pReq = NULL;
-  JdwpSuspendPolicy suspendPolicy = SP_NONE;
+  JdwpSuspendPolicy suspend_policy = SP_NONE;
 
   {
-    MutexLock mu(event_lock_);
+    MutexLock mu(event_list_lock_);
+    match_list = AllocMatchList(event_list_size_);
     if ((eventFlags & Dbg::kBreakpoint) != 0) {
-      FindMatchingEvents(EK_BREAKPOINT, &basket, matchList, &match_count);
+      FindMatchingEvents(EK_BREAKPOINT, &basket, match_list, &match_count);
     }
     if ((eventFlags & Dbg::kSingleStep) != 0) {
-      FindMatchingEvents(EK_SINGLE_STEP, &basket, matchList, &match_count);
+      FindMatchingEvents(EK_SINGLE_STEP, &basket, match_list, &match_count);
     }
     if ((eventFlags & Dbg::kMethodEntry) != 0) {
-      FindMatchingEvents(EK_METHOD_ENTRY, &basket, matchList, &match_count);
+      FindMatchingEvents(EK_METHOD_ENTRY, &basket, match_list, &match_count);
     }
     if ((eventFlags & Dbg::kMethodExit) != 0) {
-      FindMatchingEvents(EK_METHOD_EXIT, &basket, matchList, &match_count);
+      FindMatchingEvents(EK_METHOD_EXIT, &basket, match_list, &match_count);
 
       // TODO: match EK_METHOD_EXIT_WITH_RETURN_VALUE too; we need to include the 'value', though.
-      //FindMatchingEvents(EK_METHOD_EXIT_WITH_RETURN_VALUE, &basket, matchList, &match_count);
+      //FindMatchingEvents(EK_METHOD_EXIT_WITH_RETURN_VALUE, &basket, match_list, &match_count);
     }
     if (match_count != 0) {
-      VLOG(jdwp) << "EVENT: " << matchList[0]->eventKind << "(" << match_count << " total) "
+      VLOG(jdwp) << "EVENT: " << match_list[0]->eventKind << "(" << match_count << " total) "
                  << basket.className << "." << Dbg::GetMethodName(pLoc->classId, pLoc->methodId)
                  << StringPrintf(" thread=%#llx dex_pc=%#llx)", basket.threadId, pLoc->dex_pc);
 
-      suspendPolicy = scanSuspendPolicy(matchList, match_count);
-      VLOG(jdwp) << "  suspendPolicy=" << suspendPolicy;
+      suspend_policy = scanSuspendPolicy(match_list, match_count);
+      VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
 
       pReq = eventPrep();
-      expandBufAdd1(pReq, suspendPolicy);
+      expandBufAdd1(pReq, suspend_policy);
       expandBufAdd4BE(pReq, match_count);
 
       for (int i = 0; i < match_count; i++) {
-        expandBufAdd1(pReq, matchList[i]->eventKind);
-        expandBufAdd4BE(pReq, matchList[i]->requestId);
+        expandBufAdd1(pReq, match_list[i]->eventKind);
+        expandBufAdd4BE(pReq, match_list[i]->requestId);
         expandBufAdd8BE(pReq, basket.threadId);
         AddLocation(pReq, pLoc);
       }
     }
 
-    CleanupMatchList(matchList, match_count);
+    CleanupMatchList(match_list, match_count);
   }
 
   /* send request and possibly suspend ourselves */
   if (pReq != NULL) {
     int old_state = Dbg::ThreadWaiting();
-    if (suspendPolicy != SP_NONE) {
+    if (suspend_policy != SP_NONE) {
       SetWaitForEventThread(basket.threadId);
     }
 
     EventFinish(pReq);
 
-    SuspendByPolicy(suspendPolicy);
+    SuspendByPolicy(suspend_policy);
     Dbg::ThreadContinuing(old_state);
   }
 
@@ -824,49 +825,49 @@
   basket.threadId = threadId;
 
   ExpandBuf* pReq = NULL;
-  JdwpSuspendPolicy suspendPolicy = SP_NONE;
+  JdwpSuspendPolicy suspend_policy = SP_NONE;
   int match_count = 0;
   {
     // Don't allow the list to be updated while we scan it.
-    MutexLock mu(event_lock_);
-    JdwpEvent** matchList = AllocMatchList(numEvents);
+    MutexLock mu(event_list_lock_);
+    JdwpEvent** match_list = AllocMatchList(event_list_size_);
 
     if (start) {
-      FindMatchingEvents(EK_THREAD_START, &basket, matchList, &match_count);
+      FindMatchingEvents(EK_THREAD_START, &basket, match_list, &match_count);
     } else {
-      FindMatchingEvents(EK_THREAD_DEATH, &basket, matchList, &match_count);
+      FindMatchingEvents(EK_THREAD_DEATH, &basket, match_list, &match_count);
     }
 
     if (match_count != 0) {
-      VLOG(jdwp) << "EVENT: " << matchList[0]->eventKind << "(" << match_count << " total) "
+      VLOG(jdwp) << "EVENT: " << match_list[0]->eventKind << "(" << match_count << " total) "
                  << StringPrintf("thread=%#llx", basket.threadId) << ")";
 
-      suspendPolicy = scanSuspendPolicy(matchList, match_count);
-      VLOG(jdwp) << "  suspendPolicy=" << suspendPolicy;
+      suspend_policy = scanSuspendPolicy(match_list, match_count);
+      VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
 
       pReq = eventPrep();
-      expandBufAdd1(pReq, suspendPolicy);
+      expandBufAdd1(pReq, suspend_policy);
       expandBufAdd4BE(pReq, match_count);
 
       for (int i = 0; i < match_count; i++) {
-        expandBufAdd1(pReq, matchList[i]->eventKind);
-        expandBufAdd4BE(pReq, matchList[i]->requestId);
+        expandBufAdd1(pReq, match_list[i]->eventKind);
+        expandBufAdd4BE(pReq, match_list[i]->requestId);
         expandBufAdd8BE(pReq, basket.threadId);
       }
     }
 
-    CleanupMatchList(matchList, match_count);
+    CleanupMatchList(match_list, match_count);
   }
 
   /* send request and possibly suspend ourselves */
   if (pReq != NULL) {
     int old_state = Dbg::ThreadWaiting();
-    if (suspendPolicy != SP_NONE) {
+    if (suspend_policy != SP_NONE) {
       SetWaitForEventThread(basket.threadId);
     }
     EventFinish(pReq);
 
-    SuspendByPolicy(suspendPolicy);
+    SuspendByPolicy(suspend_policy);
     Dbg::ThreadContinuing(old_state);
   }
 
@@ -923,15 +924,16 @@
     return false;
   }
 
-  JdwpEvent** matchList = AllocMatchList(numEvents);
+  JdwpEvent** match_list = NULL;
   int match_count = 0;
   ExpandBuf* pReq = NULL;
-  JdwpSuspendPolicy suspendPolicy = SP_NONE;
+  JdwpSuspendPolicy suspend_policy = SP_NONE;
   {
-    MutexLock mu(event_lock_);
-    FindMatchingEvents(EK_EXCEPTION, &basket, matchList, &match_count);
+    MutexLock mu(event_list_lock_);
+    match_list = AllocMatchList(event_list_size_);
+    FindMatchingEvents(EK_EXCEPTION, &basket, match_list, &match_count);
     if (match_count != 0) {
-      VLOG(jdwp) << "EVENT: " << matchList[0]->eventKind << "(" << match_count << " total)"
+      VLOG(jdwp) << "EVENT: " << match_list[0]->eventKind << "(" << match_count << " total)"
                  << StringPrintf(" thread=%#llx", basket.threadId)
                  << StringPrintf(" exceptId=%#llx", exceptionId)
                  << " caught=" << basket.caught << ")"
@@ -942,16 +944,16 @@
         VLOG(jdwp) << "  catch: " << *pCatchLoc;
       }
 
-      suspendPolicy = scanSuspendPolicy(matchList, match_count);
-      VLOG(jdwp) << "  suspendPolicy=" << suspendPolicy;
+      suspend_policy = scanSuspendPolicy(match_list, match_count);
+      VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
 
       pReq = eventPrep();
-      expandBufAdd1(pReq, suspendPolicy);
+      expandBufAdd1(pReq, suspend_policy);
       expandBufAdd4BE(pReq, match_count);
 
       for (int i = 0; i < match_count; i++) {
-        expandBufAdd1(pReq, matchList[i]->eventKind);
-        expandBufAdd4BE(pReq, matchList[i]->requestId);
+        expandBufAdd1(pReq, match_list[i]->eventKind);
+        expandBufAdd4BE(pReq, match_list[i]->requestId);
         expandBufAdd8BE(pReq, basket.threadId);
 
         AddLocation(pReq, pThrowLoc);
@@ -964,19 +966,19 @@
       Dbg::RegisterObjectId(exceptionId);
     }
 
-    CleanupMatchList(matchList, match_count);
+    CleanupMatchList(match_list, match_count);
   }
 
   /* send request and possibly suspend ourselves */
   if (pReq != NULL) {
     int old_state = Dbg::ThreadWaiting();
-    if (suspendPolicy != SP_NONE) {
+    if (suspend_policy != SP_NONE) {
       SetWaitForEventThread(basket.threadId);
     }
 
     EventFinish(pReq);
 
-    SuspendByPolicy(suspendPolicy);
+    SuspendByPolicy(suspend_policy);
     Dbg::ThreadContinuing(old_state);
   }
 
@@ -1003,19 +1005,19 @@
     return false;
   }
 
-  JdwpEvent** matchList = AllocMatchList(numEvents);
-  int match_count = 0;
   ExpandBuf* pReq = NULL;
-  JdwpSuspendPolicy suspendPolicy = SP_NONE;
+  JdwpSuspendPolicy suspend_policy = SP_NONE;
+  int match_count = 0;
   {
-    MutexLock mu(event_lock_);
-    FindMatchingEvents(EK_CLASS_PREPARE, &basket, matchList, &match_count);
+    MutexLock mu(event_list_lock_);
+    JdwpEvent** match_list = AllocMatchList(event_list_size_);
+    FindMatchingEvents(EK_CLASS_PREPARE, &basket, match_list, &match_count);
     if (match_count != 0) {
-      VLOG(jdwp) << "EVENT: " << matchList[0]->eventKind << "(" << match_count << " total) "
+      VLOG(jdwp) << "EVENT: " << match_list[0]->eventKind << "(" << match_count << " total) "
                  << StringPrintf("thread=%#llx", basket.threadId) << ") " << signature;
 
-      suspendPolicy = scanSuspendPolicy(matchList, match_count);
-      VLOG(jdwp) << "  suspendPolicy=" << suspendPolicy;
+      suspend_policy = scanSuspendPolicy(match_list, match_count);
+      VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
 
       if (basket.threadId == debugThreadId) {
         /*
@@ -1025,18 +1027,18 @@
          */
         VLOG(jdwp) << "  NOTE: class prepare in debugger thread!";
         basket.threadId = 0;
-        if (suspendPolicy == SP_EVENT_THREAD) {
-          suspendPolicy = SP_ALL;
+        if (suspend_policy == SP_EVENT_THREAD) {
+          suspend_policy = SP_ALL;
         }
       }
 
       pReq = eventPrep();
-      expandBufAdd1(pReq, suspendPolicy);
+      expandBufAdd1(pReq, suspend_policy);
       expandBufAdd4BE(pReq, match_count);
 
       for (int i = 0; i < match_count; i++) {
-        expandBufAdd1(pReq, matchList[i]->eventKind);
-        expandBufAdd4BE(pReq, matchList[i]->requestId);
+        expandBufAdd1(pReq, match_list[i]->eventKind);
+        expandBufAdd4BE(pReq, match_list[i]->requestId);
         expandBufAdd8BE(pReq, basket.threadId);
 
         expandBufAdd1(pReq, tag);
@@ -1045,18 +1047,18 @@
         expandBufAdd4BE(pReq, status);
       }
     }
-    CleanupMatchList(matchList, match_count);
+    CleanupMatchList(match_list, match_count);
   }
 
   /* send request and possibly suspend ourselves */
   if (pReq != NULL) {
     int old_state = Dbg::ThreadWaiting();
-    if (suspendPolicy != SP_NONE) {
+    if (suspend_policy != SP_NONE) {
       SetWaitForEventThread(basket.threadId);
     }
     EventFinish(pReq);
 
-    SuspendByPolicy(suspendPolicy);
+    SuspendByPolicy(suspend_policy);
     Dbg::ThreadContinuing(old_state);
   }
 
diff --git a/src/jdwp/jdwp_event.h b/src/jdwp/jdwp_event.h
index e8f633f..b28aac9 100644
--- a/src/jdwp/jdwp_event.h
+++ b/src/jdwp/jdwp_event.h
@@ -92,7 +92,7 @@
   JdwpEvent* next;
 
   JdwpEventKind eventKind;      /* what kind of event is this? */
-  JdwpSuspendPolicy suspendPolicy;  /* suspend all, none, or self? */
+  JdwpSuspendPolicy suspend_policy;  /* suspend all, none, or self? */
   int modCount;       /* #of entries in mods[] */
   uint32_t requestId;      /* serial#, reported to debugger */
 
diff --git a/src/jdwp/jdwp_handler.cc b/src/jdwp/jdwp_handler.cc
index cf1e188..c49e9da 100644
--- a/src/jdwp/jdwp_handler.cc
+++ b/src/jdwp/jdwp_handler.cc
@@ -1157,18 +1157,18 @@
   const uint8_t* origBuf = buf;
 
   uint8_t eventKind = Read1(&buf);
-  uint8_t suspendPolicy = Read1(&buf);
+  uint8_t suspend_policy = Read1(&buf);
   uint32_t modifierCount = Read4BE(&buf);
 
   VLOG(jdwp) << "  Set(kind=" << JdwpEventKind(eventKind)
-               << " suspend=" << JdwpSuspendPolicy(suspendPolicy)
+               << " suspend=" << JdwpSuspendPolicy(suspend_policy)
                << " mods=" << modifierCount << ")";
 
   CHECK_LT(modifierCount, 256U);    /* reasonableness check */
 
   JdwpEvent* pEvent = EventAlloc(modifierCount);
   pEvent->eventKind = static_cast<JdwpEventKind>(eventKind);
-  pEvent->suspendPolicy = static_cast<JdwpSuspendPolicy>(suspendPolicy);
+  pEvent->suspend_policy = static_cast<JdwpSuspendPolicy>(suspend_policy);
   pEvent->modCount = modifierCount;
 
   /*
diff --git a/src/jdwp/jdwp_main.cc b/src/jdwp/jdwp_main.cc
index df24b8c..fb24e29 100644
--- a/src/jdwp/jdwp_main.cc
+++ b/src/jdwp/jdwp_main.cc
@@ -72,7 +72,7 @@
  */
 uint32_t JdwpState::NextRequestSerial() {
   MutexLock mu(serial_lock_);
-  return requestSerial++;
+  return request_serial_++;
 }
 
 /*
@@ -81,7 +81,7 @@
  */
 uint32_t JdwpState::NextEventSerial() {
   MutexLock mu(serial_lock_);
-  return eventSerial++;
+  return event_serial_++;
 }
 
 JdwpState::JdwpState(const JdwpOptions* options)
@@ -96,12 +96,12 @@
       attach_lock_("JDWP attach lock"),
       attach_cond_("JDWP attach condition variable"),
       lastActivityWhen(0),
-      requestSerial(0x10000000),
-      eventSerial(0x20000000),
       serial_lock_("JDWP serial lock"),
-      numEvents(0),
-      eventList(NULL),
-      event_lock_("JDWP event lock"),
+      request_serial_(0x10000000),
+      event_serial_(0x20000000),
+      event_list_lock_("JDWP event list lock"),
+      event_list_(NULL),
+      event_list_size_(0),
       event_thread_lock_("JDWP event thread lock"),
       event_thread_cond_("JDWP event thread condition variable"),
       eventThreadId(0),
@@ -140,7 +140,8 @@
    * won't signal the cond var before we're waiting.
    */
   state->thread_start_lock_.Lock();
-  if (options->suspend) {
+  const bool should_suspend = options->suspend;
+  if (should_suspend) {
     state->attach_lock_.Lock();
   }
 
@@ -165,7 +166,7 @@
    * times out (for timeout=xxx), so we have to check to see what happened
    * when we wake up.
    */
-  if (options->suspend) {
+  if (should_suspend) {
     {
       ScopedThreadStateChange tsc(Thread::Current(), kVmWait);
 
@@ -201,7 +202,10 @@
   /* could reset the serial numbers, but no need to */
 
   UnregisterAll();
-  CHECK(eventList == NULL);
+  {
+    MutexLock mu(event_list_lock_);
+    CHECK(event_list_ == NULL);
+  }
 
   /*
    * Should not have one of these in progress.  If the debugger went away
diff --git a/src/jdwp/jdwp_priv.h b/src/jdwp/jdwp_priv.h
index f1e97b7..7a6a528 100644
--- a/src/jdwp/jdwp_priv.h
+++ b/src/jdwp/jdwp_priv.h
@@ -70,14 +70,15 @@
  * Base class for JdwpNetState
  */
 class JdwpNetStateBase {
-public:
+ public:
   int clientSock;     /* active connection to debugger */
 
   JdwpNetStateBase();
   ssize_t writePacket(ExpandBuf* pReply);
   ssize_t writeBufferedPacket(const iovec* iov, int iov_count);
 
-private:
+ private:
+  // Used to serialize writes to the socket.
   Mutex socket_lock_;
 };
 
diff --git a/src/jni_internal.cc b/src/jni_internal.cc
index 4f01f7d..82fba54 100644
--- a/src/jni_internal.cc
+++ b/src/jni_internal.cc
@@ -621,6 +621,8 @@
    * If the call has not yet finished in another thread, wait for it.
    */
   bool CheckOnLoadResult() {
+    MutexLock mu(jni_on_load_lock_);
+
     Thread* self = Thread::Current();
     if (jni_on_load_thread_id_ == self->GetThinLockId()) {
       // Check this so we don't end up waiting for ourselves.  We need
@@ -630,7 +632,6 @@
       return true;
     }
 
-    MutexLock mu(jni_on_load_lock_);
     while (jni_on_load_result_ == kPending) {
       VLOG(jni) << "[" << *self << " waiting for \"" << path_ << "\" "
                 << "JNI_OnLoad...]";
@@ -645,11 +646,12 @@
   }
 
   void SetResult(bool result) {
+    MutexLock mu(jni_on_load_lock_);
+
     jni_on_load_result_ = result ? kOkay : kFailed;
     jni_on_load_thread_id_ = 0;
 
     // Broadcast a wakeup to anybody sleeping on the condition variable.
-    MutexLock mu(jni_on_load_lock_);
     jni_on_load_cond_.Broadcast();
   }
 
@@ -678,9 +680,9 @@
   // Wait for JNI_OnLoad in other thread.
   ConditionVariable jni_on_load_cond_;
   // Recursive invocation guard.
-  uint32_t jni_on_load_thread_id_;
+  uint32_t jni_on_load_thread_id_ GUARDED_BY(jni_on_load_lock_);
   // Result of earlier JNI_OnLoad call.
-  JNI_OnLoadState jni_on_load_result_;
+  JNI_OnLoadState jni_on_load_result_ GUARDED_BY(jni_on_load_lock_);
 };
 
 // This exists mainly to keep implementation details out of the header file.
diff --git a/src/jni_internal.h b/src/jni_internal.h
index 389c96b..e62d62a 100644
--- a/src/jni_internal.h
+++ b/src/jni_internal.h
@@ -133,18 +133,18 @@
 
   // Used to hold references to pinned primitive arrays.
   Mutex pins_lock;
-  ReferenceTable pin_table;
+  ReferenceTable pin_table GUARDED_BY(pins_lock);
 
   // JNI global references.
   Mutex globals_lock;
-  IndirectReferenceTable globals;
+  IndirectReferenceTable globals GUARDED_BY(globals_lock);
 
   // JNI weak global references.
   Mutex weak_globals_lock;
-  IndirectReferenceTable weak_globals;
+  IndirectReferenceTable weak_globals GUARDED_BY(weak_globals_lock);
 
   Mutex libraries_lock;
-  Libraries* libraries;
+  Libraries* libraries GUARDED_BY(libraries_lock);
 
   // Used by -Xcheck:jni.
   const JNIInvokeInterface* unchecked_functions;
diff --git a/src/macros.h b/src/macros.h
index 8fb4599..28b9f7a 100644
--- a/src/macros.h
+++ b/src/macros.h
@@ -140,4 +140,48 @@
   _rc; })
 #endif
 
+#if defined(__SUPPORT_TS_ANNOTATION__)
+
+#define ACQUIRED_AFTER(...) __attribute__ ((acquired_after(__VA_ARGS__)))
+#define ACQUIRED_BEFORE(...) __attribute__ ((acquired_before(__VA_ARGS__)))
+#define EXCLUSIVE_LOCK_FUNCTION(...) __attribute__ ((exclusive_lock(__VA_ARGS__)))
+#define EXCLUSIVE_LOCKS_REQUIRED(...) __attribute__ ((exclusive_locks_required(__VA_ARGS__)))
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) __attribute__ ((exclusive_trylock(__VA_ARGS__)))
+#define GUARDED_BY(x) __attribute__ ((guarded_by(x)))
+#define GUARDED_VAR __attribute__ ((guarded))
+#define LOCKABLE __attribute__ ((lockable))
+#define LOCK_RETURNED(x) __attribute__ ((lock_returned(x)))
+#define LOCKS_EXCLUDED(...) __attribute__ ((locks_excluded(__VA_ARGS__)))
+#define NO_THREAD_SAFETY_ANALYSIS __attribute__ ((no_thread_safety_analysis))
+#define PT_GUARDED_BY(x) __attribute__ ((point_to_guarded_by(x)))
+#define PT_GUARDED_VAR __attribute__ ((point_to_guarded))
+#define SCOPED_LOCKABLE __attribute__ ((scoped_lockable))
+#define SHARED_LOCK_FUNCTION(...) __attribute__ ((shared_lock(__VA_ARGS__)))
+#define SHARED_LOCKS_REQUIRED(...) __attribute__ ((shared_locks_required(__VA_ARGS__)))
+#define SHARED_TRYLOCK_FUNCTION(...) __attribute__ ((shared_trylock(__VA_ARGS__)))
+#define UNLOCK_FUNCTION(...) __attribute__ ((unlock(__VA_ARGS__)))
+
+#else
+
+#define ACQUIRED_AFTER(...)
+#define ACQUIRED_BEFORE(...)
+#define EXCLUSIVE_LOCK_FUNCTION(...)
+#define EXCLUSIVE_LOCKS_REQUIRED(...)
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...)
+#define GUARDED_BY(x)
+#define GUARDED_VAR
+#define LOCKABLE
+#define LOCK_RETURNED(x)
+#define LOCKS_EXCLUDED(...)
+#define NO_THREAD_SAFETY_ANALYSIS
+#define PT_GUARDED_BY(x)
+#define PT_GUARDED_VAR
+#define SCOPED_LOCKABLE
+#define SHARED_LOCK_FUNCTION(...)
+#define SHARED_LOCKS_REQUIRED(...)
+#define SHARED_TRYLOCK_FUNCTION(...)
+#define UNLOCK_FUNCTION(...)
+
+#endif // defined(__SUPPORT_TS_ANNOTATION__)
+
 #endif  // ART_SRC_MACROS_H_
diff --git a/src/monitor.h b/src/monitor.h
index b949921..e5aa01e 100644
--- a/src/monitor.h
+++ b/src/monitor.h
@@ -91,13 +91,13 @@
 
   static void FailedUnlock(Object* obj, Thread* expected_owner, Thread* found_owner, Monitor* mon);
 
-  void Lock(Thread* self);
-  bool Unlock(Thread* thread);
+  void Lock(Thread* self) NO_THREAD_SAFETY_ANALYSIS; // TODO: mark Object LOCKABLE.
+  bool Unlock(Thread* thread) NO_THREAD_SAFETY_ANALYSIS; // TODO: mark Object LOCKABLE.
 
   void Notify(Thread* self);
   void NotifyAll(Thread* self);
 
-  void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow);
+  void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow) NO_THREAD_SAFETY_ANALYSIS; // TODO: mark Object LOCKABLE.
 
   // Translates the provided method and pc into its declaring class' source file and line number.
   void TranslateLocation(const Method* method, uint32_t pc,
diff --git a/src/mutex.cc b/src/mutex.cc
index f5c4435..8fea616 100644
--- a/src/mutex.cc
+++ b/src/mutex.cc
@@ -29,6 +29,10 @@
 
 #define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_)
 
+extern int pthread_mutex_lock(pthread_mutex_t* mutex) EXCLUSIVE_LOCK_FUNCTION(mutex);
+extern int pthread_mutex_unlock(pthread_mutex_t* mutex) UNLOCK_FUNCTION(1);
+extern int pthread_mutex_trylock(pthread_mutex_t* mutex) EXCLUSIVE_TRYLOCK_FUNCTION(0, mutex);
+
 namespace art {
 
 // This works on Mac OS 10.7, but hasn't been tested on older releases.
diff --git a/src/mutex.h b/src/mutex.h
index e89e2ec..0e7c173 100644
--- a/src/mutex.h
+++ b/src/mutex.h
@@ -38,16 +38,16 @@
 };
 std::ostream& operator<<(std::ostream& os, const MutexRank& rhs);
 
-class Mutex {
+class LOCKABLE Mutex {
  public:
   explicit Mutex(const char* name, MutexRank rank = kNoMutexRank);
   ~Mutex();
 
-  void Lock();
+  void Lock() EXCLUSIVE_LOCK_FUNCTION();
 
-  bool TryLock();
+  bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true);
 
-  void Unlock();
+  void Unlock() UNLOCK_FUNCTION();
 
 #if !defined(NDEBUG)
   void AssertHeld();
@@ -71,13 +71,13 @@
   DISALLOW_COPY_AND_ASSIGN(Mutex);
 };
 
-class MutexLock {
+class SCOPED_LOCKABLE MutexLock {
  public:
-  explicit MutexLock(Mutex& mu) : mu_(mu) {
+  explicit MutexLock(Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
     mu_.Lock();
   }
 
-  ~MutexLock() {
+  ~MutexLock() UNLOCK_FUNCTION() {
     mu_.Unlock();
   }
 
@@ -85,6 +85,8 @@
   Mutex& mu_;
   DISALLOW_COPY_AND_ASSIGN(MutexLock);
 };
+// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
+#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_declaration_missing_variable_name)
 
 class ConditionVariable {
  public:
diff --git a/src/mutex_test.cc b/src/mutex_test.cc
index 7c7189b..fb14b9f 100644
--- a/src/mutex_test.cc
+++ b/src/mutex_test.cc
@@ -45,18 +45,37 @@
 TEST(Mutex, TryLockUnlock) {
   Mutex mu("test mutex");
   MutexTester::AssertDepth(mu, 0U);
-  mu.TryLock();
+  ASSERT_TRUE(mu.TryLock());
+  MutexTester::AssertDepth(mu, 1U);
+  mu.Unlock();
+  MutexTester::AssertDepth(mu, 0U);
+}
+
+// GCC doesn't get recursive mutexes, so we have to turn off thread safety analysis.
+static void RecursiveLockUnlockTest() NO_THREAD_SAFETY_ANALYSIS {
+  Mutex mu("test mutex");
+  MutexTester::AssertDepth(mu, 0U);
+  mu.Lock();
+  MutexTester::AssertDepth(mu, 1U);
+  mu.Lock();
+  MutexTester::AssertDepth(mu, 2U);
+  mu.Unlock();
   MutexTester::AssertDepth(mu, 1U);
   mu.Unlock();
   MutexTester::AssertDepth(mu, 0U);
 }
 
 TEST(Mutex, RecursiveLockUnlock) {
+  RecursiveLockUnlockTest();
+}
+
+// GCC doesn't get recursive mutexes, so we have to turn off thread safety analysis.
+static void RecursiveTryLockUnlockTest() NO_THREAD_SAFETY_ANALYSIS {
   Mutex mu("test mutex");
   MutexTester::AssertDepth(mu, 0U);
-  mu.Lock();
+  ASSERT_TRUE(mu.TryLock());
   MutexTester::AssertDepth(mu, 1U);
-  mu.Lock();
+  ASSERT_TRUE(mu.TryLock());
   MutexTester::AssertDepth(mu, 2U);
   mu.Unlock();
   MutexTester::AssertDepth(mu, 1U);
@@ -65,16 +84,7 @@
 }
 
 TEST(Mutex, RecursiveTryLockUnlock) {
-  Mutex mu("test mutex");
-  MutexTester::AssertDepth(mu, 0U);
-  mu.TryLock();
-  MutexTester::AssertDepth(mu, 1U);
-  mu.TryLock();
-  MutexTester::AssertDepth(mu, 2U);
-  mu.Unlock();
-  MutexTester::AssertDepth(mu, 1U);
-  mu.Unlock();
-  MutexTester::AssertDepth(mu, 0U);
+  RecursiveTryLockUnlockTest();
 }
 
 }  // namespace art
diff --git a/src/signal_catcher.cc b/src/signal_catcher.cc
index 5c7a30f..d3c799c 100644
--- a/src/signal_catcher.cc
+++ b/src/signal_catcher.cc
@@ -155,8 +155,8 @@
   Runtime::Current()->GetHeap()->CollectGarbage(false);
 }
 
-int SignalCatcher::WaitForSignal(SignalSet& signals) {
-  ScopedThreadStateChange tsc(thread_, kVmWait);
+int SignalCatcher::WaitForSignal(Thread* self, SignalSet& signals) {
+  ScopedThreadStateChange tsc(self, kVmWait);
 
   // Signals for sigwait() must be blocked but not ignored.  We
   // block signals like SIGQUIT for all threads, so the condition
@@ -166,7 +166,7 @@
   if (!ShouldHalt()) {
     // Let the user know we got the signal, just in case the system's too screwed for us to
     // actually do what they want us to do...
-    LOG(INFO) << *thread_ << ": reacting to signal " << signal_number;
+    LOG(INFO) << *self << ": reacting to signal " << signal_number;
 
     // If anyone's holding locks (which might prevent us from getting back into state Runnable), say so...
     Runtime::Current()->DumpLockHolders(LOG(INFO));
@@ -181,11 +181,13 @@
 
   Runtime* runtime = Runtime::Current();
   runtime->AttachCurrentThread("Signal Catcher", true, Thread::GetSystemThreadGroup());
-  Thread::Current()->SetState(kRunnable);
+
+  Thread* self = Thread::Current();
+  self->SetState(kRunnable);
 
   {
     MutexLock mu(signal_catcher->lock_);
-    signal_catcher->thread_ = Thread::Current();
+    signal_catcher->thread_ = self;
     signal_catcher->cond_.Broadcast();
   }
 
@@ -195,7 +197,7 @@
   signals.Add(SIGUSR1);
 
   while (true) {
-    int signal_number = signal_catcher->WaitForSignal(signals);
+    int signal_number = signal_catcher->WaitForSignal(self, signals);
     if (signal_catcher->ShouldHalt()) {
       runtime->DetachCurrentThread();
       return NULL;
diff --git a/src/signal_catcher.h b/src/signal_catcher.h
index 131b07c..35e035f 100644
--- a/src/signal_catcher.h
+++ b/src/signal_catcher.h
@@ -44,14 +44,15 @@
   void Output(const std::string& s);
   void SetHaltFlag(bool new_value);
   bool ShouldHalt();
-  int WaitForSignal(SignalSet& signals);
+  int WaitForSignal(Thread* self, SignalSet& signals);
 
   std::string stack_trace_file_;
+
   mutable Mutex lock_;
-  bool halt_;
   ConditionVariable cond_;
-  pthread_t pthread_;
-  Thread* thread_;
+  bool halt_ GUARDED_BY(lock_);
+  pthread_t pthread_ GUARDED_BY(lock_);
+  Thread* thread_ GUARDED_BY(lock_);
 };
 
 }  // namespace art
diff --git a/src/thread_list.cc b/src/thread_list.cc
index 0ec9bdd..186ddbc 100644
--- a/src/thread_list.cc
+++ b/src/thread_list.cc
@@ -395,7 +395,7 @@
 }
 
 void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
-  thread_list_lock_.AssertHeld();
+  ScopedThreadListLock thread_list_lock;
   for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
     callback(*it, context);
   }
@@ -446,10 +446,10 @@
 
 void ThreadList::WaitForGo() {
   Thread* self = Thread::Current();
-  DCHECK(Contains(self));
 
   {
     ScopedThreadListLock thread_list_lock;
+    DCHECK(Contains(self));
 
     // Tell our parent that we're in the thread list.
     VLOG(threads) << *self << " telling parent that we're now in thread list...";
diff --git a/src/thread_list.h b/src/thread_list.h
index dc6e820..36cd094 100644
--- a/src/thread_list.h
+++ b/src/thread_list.h
@@ -47,7 +47,7 @@
   void Suspend(Thread* thread, bool for_debugger = false);
   void UndoDebuggerSuspensions();
 
-  // Iterates over all the threads. The caller must hold the thread list lock.
+  // Iterates over all the threads.
   void ForEach(void (*callback)(Thread*, void*), void* context);
 
   void Register();
@@ -77,10 +77,10 @@
   static void ModifySuspendCount(Thread* thread, int delta, bool for_debugger);
 
   mutable Mutex allocated_ids_lock_;
-  std::bitset<kMaxThreadId> allocated_ids_;
+  std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(allocated_ids_lock_);
 
   mutable Mutex thread_list_lock_;
-  std::list<Thread*> list_;
+  std::list<Thread*> list_; // TODO: GUARDED_BY(thread_list_lock_);
 
   ConditionVariable thread_start_cond_;
   ConditionVariable thread_exit_cond_;
@@ -88,7 +88,7 @@
   // This lock guards every thread's suspend_count_ field...
   mutable Mutex thread_suspend_count_lock_;
   // ...and is used in conjunction with this condition variable.
-  ConditionVariable thread_suspend_count_cond_;
+  ConditionVariable thread_suspend_count_cond_ GUARDED_BY(thread_suspend_count_lock_);
 
   friend class Thread;
   friend class ScopedThreadListLock;
diff --git a/src/trace.cc b/src/trace.cc
index aa6c523..a453e37 100644
--- a/src/trace.cc
+++ b/src/trace.cc
@@ -485,7 +485,6 @@
 }
 
 void Trace::DumpThreadList(std::ostream& os) {
-  ScopedThreadListLock thread_list_lock;
   Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os);
 }
 
@@ -495,12 +494,7 @@
 
 void Trace::UninstallStubs() {
   Runtime::Current()->GetClassLinker()->VisitClasses(UninstallStubsClassVisitor, NULL);
-
-  // Restore stacks of all threads
-  {
-    ScopedThreadListLock thread_list_lock;
-    Runtime::Current()->GetThreadList()->ForEach(TraceRestoreStack, NULL);
-  }
+  Runtime::Current()->GetThreadList()->ForEach(TraceRestoreStack, NULL);
 }
 
 uint32_t TraceMethodUnwindFromCode(Thread* self) {