| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2011 The Android Open Source Project | 
 | 3 |  * | 
 | 4 |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
 | 5 |  * you may not use this file except in compliance with the License. | 
 | 6 |  * You may obtain a copy of the License at | 
 | 7 |  * | 
 | 8 |  *      http://www.apache.org/licenses/LICENSE-2.0 | 
 | 9 |  * | 
 | 10 |  * Unless required by applicable law or agreed to in writing, software | 
 | 11 |  * distributed under the License is distributed on an "AS IS" BASIS, | 
 | 12 |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
 | 13 |  * See the License for the specific language governing permissions and | 
 | 14 |  * limitations under the License. | 
 | 15 |  */ | 
 | 16 |  | 
| Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_BASE_MUTEX_H_ | 
 | 18 | #define ART_RUNTIME_BASE_MUTEX_H_ | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 19 |  | 
 | 20 | #include <pthread.h> | 
| Brian Carlstrom | cd74c4b | 2012-01-23 13:21:00 -0800 | [diff] [blame] | 21 | #include <stdint.h> | 
| Elliott Hughes | ffb465f | 2012-03-01 18:46:05 -0800 | [diff] [blame] | 22 |  | 
 | 23 | #include <iosfwd> | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 24 | #include <string> | 
 | 25 |  | 
| Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 26 | #include "atomic.h" | 
| Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 27 | #include "base/logging.h" | 
| Elliott Hughes | 7616005 | 2012-12-12 16:31:20 -0800 | [diff] [blame] | 28 | #include "base/macros.h" | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 29 | #include "globals.h" | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 30 |  | 
| Ian Rogers | ab47016 | 2012-09-29 23:06:53 -0700 | [diff] [blame] | 31 | #if defined(__APPLE__) | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 32 | #define ART_USE_FUTEXES 0 | 
| Ian Rogers | ab47016 | 2012-09-29 23:06:53 -0700 | [diff] [blame] | 33 | #else | 
| Chris Dearman | c014178 | 2013-11-14 17:29:21 -0800 | [diff] [blame] | 34 | #define ART_USE_FUTEXES 1 | 
| Ian Rogers | ab47016 | 2012-09-29 23:06:53 -0700 | [diff] [blame] | 35 | #endif | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 36 |  | 
| Ian Rogers | 66aee5c | 2012-08-15 17:17:47 -0700 | [diff] [blame] | 37 | // Currently Darwin doesn't support locks with timeouts. | 
 | 38 | #if !defined(__APPLE__) | 
 | 39 | #define HAVE_TIMED_RWLOCK 1 | 
 | 40 | #else | 
 | 41 | #define HAVE_TIMED_RWLOCK 0 | 
 | 42 | #endif | 
 | 43 |  | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 44 | namespace art { | 
 | 45 |  | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 46 | class LOCKABLE ReaderWriterMutex; | 
| Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 47 | class ScopedContentionRecorder; | 
| Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 48 | class Thread; | 
 | 49 |  | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 50 | // LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or | 
 | 51 | // equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free | 
 | 52 | // partial ordering and thereby cause deadlock situations to fail checks. | 
 | 53 | // | 
 | 54 | // [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163 | 
 | 55 | enum LockLevel { | 
 | 56 |   kLoggingLock = 0, | 
| Hiroshi Yamauchi | 3eed93d | 2014-06-04 11:43:59 -0700 | [diff] [blame] | 57 |   kMemMapsLock, | 
| Raghu Gandham | 7de77dd | 2014-06-13 15:16:31 -0700 | [diff] [blame] | 58 |   kSwapMutexesLock, | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 59 |   kUnexpectedSignalLock, | 
 | 60 |   kThreadSuspendCountLock, | 
 | 61 |   kAbortLock, | 
 | 62 |   kJdwpSocketLock, | 
| Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame^] | 63 |   kRegionSpaceRegionLock, | 
| Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 64 |   kReferenceQueueSoftReferencesLock, | 
 | 65 |   kReferenceQueuePhantomReferencesLock, | 
 | 66 |   kReferenceQueueFinalizerReferencesLock, | 
 | 67 |   kReferenceQueueWeakReferencesLock, | 
 | 68 |   kReferenceQueueClearedReferencesLock, | 
 | 69 |   kReferenceProcessorLock, | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 70 |   kRosAllocGlobalLock, | 
 | 71 |   kRosAllocBracketLock, | 
 | 72 |   kRosAllocBulkFreeLock, | 
 | 73 |   kAllocSpaceLock, | 
| Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame^] | 74 |   kBumpPointerSpaceBlockLock, | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 75 |   kDexFileMethodInlinerLock, | 
 | 76 |   kDexFileToMethodInlinerMapLock, | 
 | 77 |   kMarkSweepMarkStackLock, | 
 | 78 |   kTransactionLogLock, | 
 | 79 |   kInternTableLock, | 
| Vladimir Marko | 3f5838d | 2014-08-07 18:07:18 +0100 | [diff] [blame] | 80 |   kOatFileSecondaryLookupLock, | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 81 |   kDefaultMutexLevel, | 
 | 82 |   kMarkSweepLargeObjectLock, | 
 | 83 |   kPinTableLock, | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 84 |   kJdwpObjectRegistryLock, | 
| Chao-ying Fu | 9e36931 | 2014-05-21 11:20:52 -0700 | [diff] [blame] | 85 |   kModifyLdtLock, | 
 | 86 |   kAllocatedThreadIdsLock, | 
| Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 87 |   kMonitorPoolLock, | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 88 |   kClassLinkerClassesLock, | 
 | 89 |   kBreakpointLock, | 
 | 90 |   kMonitorLock, | 
| Mathieu Chartier | 440e4ce | 2014-03-31 16:36:35 -0700 | [diff] [blame] | 91 |   kMonitorListLock, | 
| Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 92 |   kJniLoadLibraryLock, | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 93 |   kThreadListLock, | 
 | 94 |   kBreakpointInvokeLock, | 
| Brian Carlstrom | 306db81 | 2014-09-05 13:01:41 -0700 | [diff] [blame] | 95 |   kAllocTrackerLock, | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 96 |   kDeoptimizationLock, | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 97 |   kProfilerLock, | 
 | 98 |   kJdwpEventListLock, | 
 | 99 |   kJdwpAttachLock, | 
 | 100 |   kJdwpStartLock, | 
 | 101 |   kRuntimeShutdownLock, | 
| Jeff Hao | 69dbec6 | 2014-09-15 18:03:41 -0700 | [diff] [blame] | 102 |   kTraceLock, | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 103 |   kHeapBitmapLock, | 
 | 104 |   kMutatorLock, | 
| Mathieu Chartier | 9ef78b5 | 2014-09-25 17:03:12 -0700 | [diff] [blame] | 105 |   kInstrumentEntrypointsLock, | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 106 |   kZygoteCreationLock, | 
 | 107 |  | 
 | 108 |   kLockLevelCount  // Must come last. | 
 | 109 | }; | 
 | 110 | std::ostream& operator<<(std::ostream& os, const LockLevel& rhs); | 
 | 111 |  | 
| Brian Carlstrom | 2e250c8 | 2013-08-14 18:08:52 -0700 | [diff] [blame] | 112 | const bool kDebugLocking = kIsDebugBuild; | 
| Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 113 |  | 
| Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 114 | // Record Log contention information, dumpable via SIGQUIT. | 
 | 115 | #ifdef ART_USE_FUTEXES | 
| Jeff Hao | 08f2e7b | 2013-09-09 16:44:02 -0700 | [diff] [blame] | 116 | // To enable lock contention logging, set this to true. | 
 | 117 | const bool kLogLockContentions = false; | 
| Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 118 | #else | 
 | 119 | // Keep this false as lock contention logging is supported only with | 
 | 120 | // futex. | 
 | 121 | const bool kLogLockContentions = false; | 
 | 122 | #endif | 
| Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 123 | const size_t kContentionLogSize = 4; | 
| Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 124 | const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0; | 
 | 125 | const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0; | 
 | 126 |  | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 127 | // Base class for all Mutex implementations | 
 | 128 | class BaseMutex { | 
 | 129 |  public: | 
| Ian Rogers | bab7496 | 2013-04-19 10:04:10 -0700 | [diff] [blame] | 130 |   const char* GetName() const { | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 131 |     return name_; | 
 | 132 |   } | 
 | 133 |  | 
 | 134 |   virtual bool IsMutex() const { return false; } | 
 | 135 |   virtual bool IsReaderWriterMutex() const { return false; } | 
 | 136 |  | 
| Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 137 |   virtual void Dump(std::ostream& os) const = 0; | 
 | 138 |  | 
 | 139 |   static void DumpAll(std::ostream& os); | 
 | 140 |  | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 141 |  protected: | 
 | 142 |   friend class ConditionVariable; | 
 | 143 |  | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 144 |   BaseMutex(const char* name, LockLevel level); | 
| Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 145 |   virtual ~BaseMutex(); | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 146 |   void RegisterAsLocked(Thread* self); | 
 | 147 |   void RegisterAsUnlocked(Thread* self); | 
 | 148 |   void CheckSafeToWait(Thread* self); | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 149 |  | 
| Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 150 |   friend class ScopedContentionRecorder; | 
 | 151 |  | 
| Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 152 |   void RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint64_t nano_time_blocked); | 
| Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 153 |   void DumpContention(std::ostream& os) const; | 
 | 154 |  | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 155 |   const LockLevel level_;  // Support for lock hierarchy. | 
| Ian Rogers | bab7496 | 2013-04-19 10:04:10 -0700 | [diff] [blame] | 156 |   const char* const name_; | 
| Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 157 |  | 
| Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 158 |   // A log entry that records contention but makes no guarantee that either tid will be held live. | 
 | 159 |   struct ContentionLogEntry { | 
 | 160 |     ContentionLogEntry() : blocked_tid(0), owner_tid(0) {} | 
 | 161 |     uint64_t blocked_tid; | 
 | 162 |     uint64_t owner_tid; | 
 | 163 |     AtomicInteger count; | 
 | 164 |   }; | 
| Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 165 |   struct ContentionLogData { | 
 | 166 |     ContentionLogEntry contention_log[kContentionLogSize]; | 
 | 167 |     // The next entry in the contention log to be updated. Value ranges from 0 to | 
 | 168 |     // kContentionLogSize - 1. | 
 | 169 |     AtomicInteger cur_content_log_entry; | 
 | 170 |     // Number of times the Mutex has been contended. | 
 | 171 |     AtomicInteger contention_count; | 
 | 172 |     // Sum of time waited by all contenders in ns. | 
| Ian Rogers | 37f3c96 | 2014-07-17 11:25:30 -0700 | [diff] [blame] | 173 |     Atomic<uint64_t> wait_time; | 
| Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 174 |     void AddToWaitTime(uint64_t value); | 
 | 175 |     ContentionLogData() : wait_time(0) {} | 
 | 176 |   }; | 
| Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 177 |   ContentionLogData contention_log_data_[kContentionLogDataSize]; | 
| Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 178 |  | 
 | 179 |  public: | 
 | 180 |   bool HasEverContended() const { | 
 | 181 |     if (kLogLockContentions) { | 
| Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 182 |       return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0; | 
| Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 183 |     } | 
 | 184 |     return false; | 
 | 185 |   } | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 186 | }; | 
 | 187 |  | 
 | 188 | // A Mutex is used to achieve mutual exclusion between threads. A Mutex can be used to gain | 
 | 189 | // exclusive access to what it guards. A Mutex can be in one of two states: | 
 | 190 | // - Free - not owned by any thread, | 
 | 191 | // - Exclusive - owned by a single thread. | 
 | 192 | // | 
 | 193 | // The effect of locking and unlocking operations on the state is: | 
 | 194 | // State     | ExclusiveLock | ExclusiveUnlock | 
 | 195 | // ------------------------------------------- | 
 | 196 | // Free      | Exclusive     | error | 
 | 197 | // Exclusive | Block*        | Free | 
 | 198 | // * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in | 
 | 199 | //   an error. Being non-reentrant simplifies Waiting on ConditionVariables. | 
| Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 200 | std::ostream& operator<<(std::ostream& os, const Mutex& mu); | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 201 | class LOCKABLE Mutex : public BaseMutex { | 
 | 202 |  public: | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 203 |   explicit Mutex(const char* name, LockLevel level = kDefaultMutexLevel, bool recursive = false); | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 204 |   ~Mutex(); | 
 | 205 |  | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 206 |   virtual bool IsMutex() const { return true; } | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 207 |  | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 208 |   // Block until mutex is free then acquire exclusive access. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 209 |   void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION(); | 
 | 210 |   void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); } | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 211 |  | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 212 |   // Returns true if acquires exclusive access, false otherwise. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 213 |   bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true); | 
 | 214 |   bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); } | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 215 |  | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 216 |   // Release exclusive access. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 217 |   void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION(); | 
 | 218 |   void Unlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); } | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 219 |  | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 220 |   // Is the current thread the exclusive holder of the Mutex. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 221 |   bool IsExclusiveHeld(const Thread* self) const; | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 222 |  | 
 | 223 |   // Assert that the Mutex is exclusively held by the current thread. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 224 |   void AssertExclusiveHeld(const Thread* self) { | 
| Nicolas Geoffray | db97871 | 2014-12-09 13:33:38 +0000 | [diff] [blame] | 225 |     if (kDebugLocking && (gAborting == 0)) { | 
| Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 226 |       CHECK(IsExclusiveHeld(self)) << *this; | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 227 |     } | 
 | 228 |   } | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 229 |   void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); } | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 230 |  | 
 | 231 |   // Assert that the Mutex is not held by the current thread. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 232 |   void AssertNotHeldExclusive(const Thread* self) { | 
| Nicolas Geoffray | db97871 | 2014-12-09 13:33:38 +0000 | [diff] [blame] | 233 |     if (kDebugLocking && (gAborting == 0)) { | 
| Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 234 |       CHECK(!IsExclusiveHeld(self)) << *this; | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 235 |     } | 
 | 236 |   } | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 237 |   void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); } | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 238 |  | 
| Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 239 |   // Id associated with exclusive owner. No memory ordering semantics if called from a thread other | 
 | 240 |   // than the owner. | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 241 |   uint64_t GetExclusiveOwnerTid() const; | 
 | 242 |  | 
 | 243 |   // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld. | 
 | 244 |   unsigned int GetDepth() const { | 
 | 245 |     return recursion_count_; | 
 | 246 |   } | 
| Elliott Hughes | accd83d | 2011-10-17 14:25:58 -0700 | [diff] [blame] | 247 |  | 
| Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 248 |   virtual void Dump(std::ostream& os) const; | 
| Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 249 |  | 
| Elliott Hughes | accd83d | 2011-10-17 14:25:58 -0700 | [diff] [blame] | 250 |  private: | 
| Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 251 | #if ART_USE_FUTEXES | 
 | 252 |   // 0 is unheld, 1 is held. | 
| Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 253 |   AtomicInteger state_; | 
| Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 254 |   // Exclusive owner. | 
 | 255 |   volatile uint64_t exclusive_owner_; | 
 | 256 |   // Number of waiting contenders. | 
| Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 257 |   AtomicInteger num_contenders_; | 
| Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 258 | #else | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 259 |   pthread_mutex_t mutex_; | 
| Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 260 |   volatile uint64_t exclusive_owner_;  // Guarded by mutex_. | 
| Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 261 | #endif | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 262 |   const bool recursive_;  // Can the lock be recursively held? | 
 | 263 |   unsigned int recursion_count_; | 
| Elliott Hughes | f149843 | 2012-03-28 19:34:27 -0700 | [diff] [blame] | 264 |   friend class ConditionVariable; | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 265 |   DISALLOW_COPY_AND_ASSIGN(Mutex); | 
 | 266 | }; | 
 | 267 |  | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 268 | // A ReaderWriterMutex is used to achieve mutual exclusion between threads, similar to a Mutex. | 
 | 269 | // Unlike a Mutex a ReaderWriterMutex can be used to gain exclusive (writer) or shared (reader) | 
 | 270 | // access to what it guards. A flaw in relation to a Mutex is that it cannot be used with a | 
 | 271 | // condition variable. A ReaderWriterMutex can be in one of three states: | 
 | 272 | // - Free - not owned by any thread, | 
 | 273 | // - Exclusive - owned by a single thread, | 
 | 274 | // - Shared(n) - shared amongst n threads. | 
 | 275 | // | 
 | 276 | // The effect of locking and unlocking operations on the state is: | 
 | 277 | // | 
 | 278 | // State     | ExclusiveLock | ExclusiveUnlock | SharedLock       | SharedUnlock | 
 | 279 | // ---------------------------------------------------------------------------- | 
 | 280 | // Free      | Exclusive     | error           | SharedLock(1)    | error | 
 | 281 | // Exclusive | Block         | Free            | Block            | error | 
 | 282 | // Shared(n) | Block         | error           | SharedLock(n+1)* | Shared(n-1) or Free | 
 | 283 | // * for large values of n the SharedLock may block. | 
| Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 284 | std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu); | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 285 | class LOCKABLE ReaderWriterMutex : public BaseMutex { | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 286 |  public: | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 287 |   explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel); | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 288 |   ~ReaderWriterMutex(); | 
 | 289 |  | 
 | 290 |   virtual bool IsReaderWriterMutex() const { return true; } | 
 | 291 |  | 
 | 292 |   // Block until ReaderWriterMutex is free then acquire exclusive access. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 293 |   void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION(); | 
 | 294 |   void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() {  ExclusiveLock(self); } | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 295 |  | 
 | 296 |   // Release exclusive access. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 297 |   void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION(); | 
 | 298 |   void WriterUnlock(Thread* self) UNLOCK_FUNCTION() {  ExclusiveUnlock(self); } | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 299 |  | 
 | 300 |   // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success | 
 | 301 |   // or false if timeout is reached. | 
| Ian Rogers | 66aee5c | 2012-08-15 17:17:47 -0700 | [diff] [blame] | 302 | #if HAVE_TIMED_RWLOCK | 
| Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 303 |   bool ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 304 |       EXCLUSIVE_TRYLOCK_FUNCTION(true); | 
| Ian Rogers | 66aee5c | 2012-08-15 17:17:47 -0700 | [diff] [blame] | 305 | #endif | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 306 |  | 
 | 307 |   // Block until ReaderWriterMutex is shared or free then acquire a share on the access. | 
| Ian Rogers | 1ffa32f | 2013-02-05 18:29:08 -0800 | [diff] [blame] | 308 |   void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE; | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 309 |   void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); } | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 310 |  | 
 | 311 |   // Try to acquire share of ReaderWriterMutex. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 312 |   bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true); | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 313 |  | 
 | 314 |   // Release a share of the access. | 
| Ian Rogers | 1ffa32f | 2013-02-05 18:29:08 -0800 | [diff] [blame] | 315 |   void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE; | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 316 |   void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); } | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 317 |  | 
 | 318 |   // Is the current thread the exclusive holder of the ReaderWriterMutex. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 319 |   bool IsExclusiveHeld(const Thread* self) const; | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 320 |  | 
 | 321 |   // Assert the current thread has exclusive access to the ReaderWriterMutex. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 322 |   void AssertExclusiveHeld(const Thread* self) { | 
| Nicolas Geoffray | db97871 | 2014-12-09 13:33:38 +0000 | [diff] [blame] | 323 |     if (kDebugLocking && (gAborting == 0)) { | 
| Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 324 |       CHECK(IsExclusiveHeld(self)) << *this; | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 325 |     } | 
 | 326 |   } | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 327 |   void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); } | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 328 |  | 
 | 329 |   // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 330 |   void AssertNotExclusiveHeld(const Thread* self) { | 
| Nicolas Geoffray | db97871 | 2014-12-09 13:33:38 +0000 | [diff] [blame] | 331 |     if (kDebugLocking && (gAborting == 0)) { | 
| Ian Rogers | e3359f7 | 2013-06-11 15:14:11 -0700 | [diff] [blame] | 332 |       CHECK(!IsExclusiveHeld(self)) << *this; | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 333 |     } | 
 | 334 |   } | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 335 |   void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); } | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 336 |  | 
 | 337 |   // Is the current thread a shared holder of the ReaderWriterMutex. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 338 |   bool IsSharedHeld(const Thread* self) const; | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 339 |  | 
 | 340 |   // Assert the current thread has shared access to the ReaderWriterMutex. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 341 |   void AssertSharedHeld(const Thread* self) { | 
| Nicolas Geoffray | db97871 | 2014-12-09 13:33:38 +0000 | [diff] [blame] | 342 |     if (kDebugLocking && (gAborting == 0)) { | 
| Ian Rogers | 23055dc | 2013-04-18 16:29:16 -0700 | [diff] [blame] | 343 |       // TODO: we can only assert this well when self != NULL. | 
 | 344 |       CHECK(IsSharedHeld(self) || self == NULL) << *this; | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 345 |     } | 
 | 346 |   } | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 347 |   void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); } | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 348 |  | 
 | 349 |   // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive | 
 | 350 |   // mode. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 351 |   void AssertNotHeld(const Thread* self) { | 
| Nicolas Geoffray | db97871 | 2014-12-09 13:33:38 +0000 | [diff] [blame] | 352 |     if (kDebugLocking && (gAborting == 0)) { | 
| Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 353 |       CHECK(!IsSharedHeld(self)) << *this; | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 354 |     } | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 355 |   } | 
 | 356 |  | 
| Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 357 |   // Id associated with exclusive owner. No memory ordering semantics if called from a thread other | 
 | 358 |   // than the owner. | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 359 |   uint64_t GetExclusiveOwnerTid() const; | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 360 |  | 
| Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 361 |   virtual void Dump(std::ostream& os) const; | 
| Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 362 |  | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 363 |  private: | 
| Ian Rogers | 51d212e | 2014-10-23 17:48:20 -0700 | [diff] [blame] | 364 | #if ART_USE_FUTEXES | 
| Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 365 |   // Out-of-inline path for handling contention for a SharedLock. | 
 | 366 |   void HandleSharedLockContention(Thread* self, int32_t cur_state); | 
 | 367 |  | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 368 |   // -1 implies held exclusive, +ve shared held by state_ many owners. | 
| Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 369 |   AtomicInteger state_; | 
 | 370 |   // Exclusive owner. Modification guarded by this mutex. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 371 |   volatile uint64_t exclusive_owner_; | 
| Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 372 |   // Number of contenders waiting for a reader share. | 
 | 373 |   AtomicInteger num_pending_readers_; | 
 | 374 |   // Number of contenders waiting to be the writer. | 
| Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 375 |   AtomicInteger num_pending_writers_; | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 376 | #else | 
 | 377 |   pthread_rwlock_t rwlock_; | 
| Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 378 |   volatile uint64_t exclusive_owner_;  // Guarded by rwlock_. | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 379 | #endif | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 380 |   DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex); | 
 | 381 | }; | 
 | 382 |  | 
 | 383 | // ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually | 
 | 384 | // (Signal) or all at once (Broadcast). | 
| Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 385 | class ConditionVariable { | 
 | 386 |  public: | 
| Ian Rogers | 23055dc | 2013-04-18 16:29:16 -0700 | [diff] [blame] | 387 |   explicit ConditionVariable(const char* name, Mutex& mutex); | 
| Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 388 |   ~ConditionVariable(); | 
 | 389 |  | 
| Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 390 |   void Broadcast(Thread* self); | 
 | 391 |   void Signal(Thread* self); | 
 | 392 |   // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their | 
 | 393 |   //       pointer copy, thereby defeating annotalysis. | 
 | 394 |   void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS; | 
| Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 395 |   bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS; | 
| Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 396 |   // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held | 
 | 397 |   // when waiting. | 
 | 398 |   // TODO: remove this. | 
 | 399 |   void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS; | 
| Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 400 |  | 
 | 401 |  private: | 
| Ian Rogers | 23055dc | 2013-04-18 16:29:16 -0700 | [diff] [blame] | 402 |   const char* const name_; | 
| Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 403 |   // The Mutex being used by waiters. It is an error to mix condition variables between different | 
 | 404 |   // Mutexes. | 
 | 405 |   Mutex& guard_; | 
 | 406 | #if ART_USE_FUTEXES | 
 | 407 |   // A counter that is modified by signals and broadcasts. This ensures that when a waiter gives up | 
| Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 408 |   // their Mutex and another thread takes it and signals, the waiting thread observes that sequence_ | 
 | 409 |   // changed and doesn't enter the wait. Modified while holding guard_, but is read by futex wait | 
 | 410 |   // without guard_ held. | 
| Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 411 |   AtomicInteger sequence_; | 
| Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 412 |   // Number of threads that have come into to wait, not the length of the waiters on the futex as | 
| Ian Rogers | 5bd97c4 | 2012-11-27 02:38:26 -0800 | [diff] [blame] | 413 |   // waiters may have been requeued onto guard_. Guarded by guard_. | 
| Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 414 |   volatile int32_t num_waiters_; | 
| Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 415 | #else | 
 | 416 |   pthread_cond_t cond_; | 
 | 417 | #endif | 
| Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 418 |   DISALLOW_COPY_AND_ASSIGN(ConditionVariable); | 
 | 419 | }; | 
 | 420 |  | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 421 | // Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it | 
 | 422 | // upon destruction. | 
 | 423 | class SCOPED_LOCKABLE MutexLock { | 
 | 424 |  public: | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 425 |   explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) { | 
 | 426 |     mu_.ExclusiveLock(self_); | 
 | 427 |   } | 
 | 428 |  | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 429 |   ~MutexLock() UNLOCK_FUNCTION() { | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 430 |     mu_.ExclusiveUnlock(self_); | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 431 |   } | 
 | 432 |  | 
 | 433 |  private: | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 434 |   Thread* const self_; | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 435 |   Mutex& mu_; | 
 | 436 |   DISALLOW_COPY_AND_ASSIGN(MutexLock); | 
 | 437 | }; | 
 | 438 | // Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)". | 
| Andreas Gampe | 575e78c | 2014-11-03 23:41:03 -0800 | [diff] [blame] | 439 | #define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name") | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 440 |  | 
 | 441 | // Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon | 
 | 442 | // construction and releases it upon destruction. | 
 | 443 | class SCOPED_LOCKABLE ReaderMutexLock { | 
 | 444 |  public: | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 445 |   explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : | 
 | 446 |       self_(self), mu_(mu) { | 
 | 447 |     mu_.SharedLock(self_); | 
 | 448 |   } | 
 | 449 |  | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 450 |   ~ReaderMutexLock() UNLOCK_FUNCTION() { | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 451 |     mu_.SharedUnlock(self_); | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 452 |   } | 
 | 453 |  | 
 | 454 |  private: | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 455 |   Thread* const self_; | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 456 |   ReaderWriterMutex& mu_; | 
 | 457 |   DISALLOW_COPY_AND_ASSIGN(ReaderMutexLock); | 
 | 458 | }; | 
 | 459 | // Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of | 
 | 460 | // "ReaderMutexLock mu(lock)". | 
| Andreas Gampe | 575e78c | 2014-11-03 23:41:03 -0800 | [diff] [blame] | 461 | #define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name") | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 462 |  | 
 | 463 | // Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon | 
 | 464 | // construction and releases it upon destruction. | 
 | 465 | class SCOPED_LOCKABLE WriterMutexLock { | 
 | 466 |  public: | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 467 |   explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : | 
 | 468 |       self_(self), mu_(mu) { | 
 | 469 |     mu_.ExclusiveLock(self_); | 
 | 470 |   } | 
 | 471 |  | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 472 |   ~WriterMutexLock() UNLOCK_FUNCTION() { | 
| Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 473 |     mu_.ExclusiveUnlock(self_); | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 474 |   } | 
 | 475 |  | 
 | 476 |  private: | 
| Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 477 |   Thread* const self_; | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 478 |   ReaderWriterMutex& mu_; | 
 | 479 |   DISALLOW_COPY_AND_ASSIGN(WriterMutexLock); | 
 | 480 | }; | 
 | 481 | // Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of | 
 | 482 | // "WriterMutexLock mu(lock)". | 
| Andreas Gampe | 575e78c | 2014-11-03 23:41:03 -0800 | [diff] [blame] | 483 | #define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name") | 
| Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 484 |  | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 485 | // Global mutexes corresponding to the levels above. | 
 | 486 | class Locks { | 
 | 487 |  public: | 
 | 488 |   static void Init(); | 
 | 489 |  | 
| Mathieu Chartier | 9ef78b5 | 2014-09-25 17:03:12 -0700 | [diff] [blame] | 490 |   // Guards allocation entrypoint instrumenting. | 
| Ian Rogers | 4ad5cd3 | 2014-11-11 23:08:07 -0800 | [diff] [blame] | 491 |   static Mutex* instrument_entrypoints_lock_; | 
| Mathieu Chartier | 9ef78b5 | 2014-09-25 17:03:12 -0700 | [diff] [blame] | 492 |  | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 493 |   // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block | 
 | 494 |   // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds | 
 | 495 |   // a share on the mutator_lock_. The garbage collector may also execute with shared access but | 
 | 496 |   // at times requires exclusive access to the heap (not to be confused with the heap meta-data | 
 | 497 |   // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks | 
 | 498 |   // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_ | 
 | 499 |   // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition | 
 | 500 |   // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on | 
 | 501 |   // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector) | 
 | 502 |   // chance to acquire the lock. | 
 | 503 |   // | 
 | 504 |   // Thread suspension: | 
 | 505 |   // Shared users                                  | Exclusive user | 
 | 506 |   // (holding mutator lock and in kRunnable state) |   .. running .. | 
 | 507 |   //   .. running ..                               | Request thread suspension by: | 
 | 508 |   //   .. running ..                               |   - acquiring thread_suspend_count_lock_ | 
 | 509 |   //   .. running ..                               |   - incrementing Thread::suspend_count_ on | 
 | 510 |   //   .. running ..                               |     all mutator threads | 
 | 511 |   //   .. running ..                               |   - releasing thread_suspend_count_lock_ | 
 | 512 |   //   .. running ..                               | Block trying to acquire exclusive mutator lock | 
 | 513 |   // Poll Thread::suspend_count_ and enter full    |   .. blocked .. | 
 | 514 |   // suspend code.                                 |   .. blocked .. | 
 | 515 |   // Change state to kSuspended                    |   .. blocked .. | 
 | 516 |   // x: Release share on mutator_lock_             | Carry out exclusive access | 
 | 517 |   // Acquire thread_suspend_count_lock_            |   .. exclusive .. | 
 | 518 |   // while Thread::suspend_count_ > 0              |   .. exclusive .. | 
 | 519 |   //   - wait on Thread::resume_cond_              |   .. exclusive .. | 
 | 520 |   //     (releases thread_suspend_count_lock_)     |   .. exclusive .. | 
 | 521 |   //   .. waiting ..                               | Release mutator_lock_ | 
 | 522 |   //   .. waiting ..                               | Request thread resumption by: | 
 | 523 |   //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_ | 
 | 524 |   //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on | 
 | 525 |   //   .. waiting ..                               |     all mutator threads | 
 | 526 |   //   .. waiting ..                               |   - notifying on Thread::resume_cond_ | 
 | 527 |   //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_ | 
 | 528 |   // Release thread_suspend_count_lock_            |  .. running .. | 
 | 529 |   // Acquire share on mutator_lock_                |  .. running .. | 
 | 530 |   //  - This could block but the thread still      |  .. running .. | 
 | 531 |   //    has a state of kSuspended and so this      |  .. running .. | 
 | 532 |   //    isn't an issue.                            |  .. running .. | 
 | 533 |   // Acquire thread_suspend_count_lock_            |  .. running .. | 
 | 534 |   //  - we poll here as we're transitioning into   |  .. running .. | 
 | 535 |   //    kRunnable and an individual thread suspend |  .. running .. | 
 | 536 |   //    request (e.g for debugging) won't try      |  .. running .. | 
 | 537 |   //    to acquire the mutator lock (which would   |  .. running .. | 
 | 538 |   //    block as we hold the mutator lock). This   |  .. running .. | 
 | 539 |   //    poll ensures that if the suspender thought |  .. running .. | 
 | 540 |   //    we were suspended by incrementing our      |  .. running .. | 
 | 541 |   //    Thread::suspend_count_ and then reading    |  .. running .. | 
 | 542 |   //    our state we go back to waiting on         |  .. running .. | 
 | 543 |   //    Thread::resume_cond_.                      |  .. running .. | 
 | 544 |   // can_go_runnable = Thread::suspend_count_ == 0 |  .. running .. | 
 | 545 |   // Release thread_suspend_count_lock_            |  .. running .. | 
 | 546 |   // if can_go_runnable                            |  .. running .. | 
 | 547 |   //   Change state to kRunnable                   |  .. running .. | 
 | 548 |   // else                                          |  .. running .. | 
 | 549 |   //   Goto x                                      |  .. running .. | 
 | 550 |   //  .. running ..                                |  .. running .. | 
| Mathieu Chartier | 9ef78b5 | 2014-09-25 17:03:12 -0700 | [diff] [blame] | 551 |   static ReaderWriterMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_); | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 552 |  | 
 | 553 |   // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap. | 
 | 554 |   static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_); | 
 | 555 |  | 
 | 556 |   // Guards shutdown of the runtime. | 
 | 557 |   static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_); | 
 | 558 |  | 
| Chao-ying Fu | 9e36931 | 2014-05-21 11:20:52 -0700 | [diff] [blame] | 559 |   // Guards background profiler global state. | 
 | 560 |   static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_); | 
 | 561 |  | 
 | 562 |   // Guards trace (ie traceview) requests. | 
 | 563 |   static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_); | 
 | 564 |  | 
| Brian Carlstrom | 306db81 | 2014-09-05 13:01:41 -0700 | [diff] [blame] | 565 |   // Guards debugger recent allocation records. | 
 | 566 |   static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_); | 
 | 567 |  | 
 | 568 |   // Guards updates to instrumentation to ensure mutual exclusion of | 
 | 569 |   // events like deoptimization requests. | 
 | 570 |   // TODO: improve name, perhaps instrumentation_update_lock_. | 
 | 571 |   static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_); | 
 | 572 |  | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 573 |   // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads | 
 | 574 |   // attaching and detaching. | 
| Brian Carlstrom | 306db81 | 2014-09-05 13:01:41 -0700 | [diff] [blame] | 575 |   static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_); | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 576 |  | 
| Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 577 |   // Guards maintaining loading library data structures. | 
 | 578 |   static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_); | 
 | 579 |  | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 580 |   // Guards breakpoints. | 
| Sebastien Hertz | ed2be17 | 2014-08-19 15:33:43 +0200 | [diff] [blame] | 581 |   static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_); | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 582 |  | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 583 |   // Guards lists of classes within the class linker. | 
| Chao-ying Fu | 9e36931 | 2014-05-21 11:20:52 -0700 | [diff] [blame] | 584 |   static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_); | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 585 |  | 
 | 586 |   // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code | 
 | 587 |   // doesn't try to hold a higher level Mutex. | 
 | 588 |   #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_) | 
 | 589 |  | 
| Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 590 |   static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_); | 
 | 591 |  | 
| Chao-ying Fu | 9e36931 | 2014-05-21 11:20:52 -0700 | [diff] [blame] | 592 |   // Guard the allocation/deallocation of thread ids. | 
| Andreas Gampe | 7424081 | 2014-04-17 10:35:09 -0700 | [diff] [blame] | 593 |   static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_); | 
| Chao-ying Fu | 9e36931 | 2014-05-21 11:20:52 -0700 | [diff] [blame] | 594 |  | 
 | 595 |   // Guards modification of the LDT on x86. | 
 | 596 |   static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_); | 
 | 597 |  | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 598 |   // Guards intern table. | 
| Chao-ying Fu | 9e36931 | 2014-05-21 11:20:52 -0700 | [diff] [blame] | 599 |   static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_); | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 600 |  | 
| Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 601 |   // Guards reference processor. | 
 | 602 |   static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_); | 
 | 603 |  | 
 | 604 |   // Guards cleared references queue. | 
 | 605 |   static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_); | 
 | 606 |  | 
 | 607 |   // Guards weak references queue. | 
 | 608 |   static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_); | 
 | 609 |  | 
 | 610 |   // Guards finalizer references queue. | 
 | 611 |   static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_); | 
 | 612 |  | 
 | 613 |   // Guards phantom references queue. | 
 | 614 |   static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_); | 
 | 615 |  | 
 | 616 |   // Guards soft references queue. | 
 | 617 |   static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_); | 
 | 618 |  | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 619 |   // Have an exclusive aborting thread. | 
| Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 620 |   static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_); | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 621 |  | 
 | 622 |   // Allow mutual exclusion when manipulating Thread::suspend_count_. | 
 | 623 |   // TODO: Does the trade-off of a per-thread lock make sense? | 
 | 624 |   static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_); | 
 | 625 |  | 
 | 626 |   // One unexpected signal at a time lock. | 
 | 627 |   static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_); | 
 | 628 |  | 
| Hiroshi Yamauchi | 3eed93d | 2014-06-04 11:43:59 -0700 | [diff] [blame] | 629 |   // Guards the maps in mem_map. | 
 | 630 |   static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_); | 
 | 631 |  | 
| Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 632 |   // Have an exclusive logging thread. | 
 | 633 |   static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_); | 
 | 634 | }; | 
 | 635 |  | 
| Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 636 | }  // namespace art | 
 | 637 |  | 
| Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 638 | #endif  // ART_RUNTIME_BASE_MUTEX_H_ |