Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_BASE_MUTEX_INL_H_ |
| 18 | #define ART_RUNTIME_BASE_MUTEX_INL_H_ |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 19 | |
Ian Rogers | 220228e | 2014-01-23 09:08:16 -0800 | [diff] [blame] | 20 | #include <inttypes.h> |
| 21 | |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 22 | #include "mutex.h" |
| 23 | |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 24 | #include "base/utils.h" |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 25 | #include "base/value_object.h" |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 26 | #include "thread.h" |
| 27 | |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 28 | #if ART_USE_FUTEXES |
| 29 | #include "linux/futex.h" |
| 30 | #include "sys/syscall.h" |
| 31 | #ifndef SYS_futex |
| 32 | #define SYS_futex __NR_futex |
| 33 | #endif |
Chih-Hung Hsieh | 729c1cf | 2014-11-06 10:49:16 -0800 | [diff] [blame] | 34 | #endif // ART_USE_FUTEXES |
| 35 | |
Ian Rogers | d6d7c3b | 2014-11-06 14:26:29 -0800 | [diff] [blame] | 36 | #define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_) |
| 37 | |
Chih-Hung Hsieh | 729c1cf | 2014-11-06 10:49:16 -0800 | [diff] [blame] | 38 | namespace art { |
| 39 | |
| 40 | #if ART_USE_FUTEXES |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 41 | static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, |
| 42 | volatile int *uaddr2, int val3) { |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 43 | return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3); |
| 44 | } |
| 45 | #endif // ART_USE_FUTEXES |
| 46 | |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 47 | // The following isn't strictly necessary, but we want updates on Atomic<pid_t> to be lock-free. |
| 48 | // TODO: Use std::atomic::is_always_lock_free after switching to C++17 atomics. |
| 49 | static_assert(sizeof(pid_t) <= sizeof(int32_t), "pid_t should fit in 32 bits"); |
| 50 | |
| 51 | static inline pid_t SafeGetTid(const Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 52 | if (self != nullptr) { |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 53 | return self->GetTid(); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 54 | } else { |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 55 | return GetTid(); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 56 | } |
| 57 | } |
| 58 | |
| 59 | static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALYSIS { |
| 60 | // The check below enumerates the cases where we expect not to be able to sanity check locks |
| 61 | // on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock. |
| 62 | // TODO: tighten this check. |
| 63 | if (kDebugLocking) { |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 64 | CHECK(!Locks::IsSafeToCallAbortRacy() || |
Chao-ying Fu | 9e36931 | 2014-05-21 11:20:52 -0700 | [diff] [blame] | 65 | // Used during thread creation to avoid races with runtime shutdown. Thread::Current not |
| 66 | // yet established. |
| 67 | level == kRuntimeShutdownLock || |
| 68 | // Thread Ids are allocated/released before threads are established. |
| 69 | level == kAllocatedThreadIdsLock || |
| 70 | // Thread LDT's are initialized without Thread::Current established. |
| 71 | level == kModifyLdtLock || |
| 72 | // Threads are unregistered while holding the thread list lock, during this process they |
| 73 | // no longer exist and so we expect an unlock with no self. |
| 74 | level == kThreadListLock || |
| 75 | // Ignore logging which may or may not have set up thread data structures. |
| 76 | level == kLoggingLock || |
Nicolas Geoffray | 9f5f8ac | 2016-06-29 14:39:59 +0100 | [diff] [blame] | 77 | // When transitioning from suspended to runnable, a daemon thread might be in |
| 78 | // a situation where the runtime is shutting down. To not crash our debug locking |
| 79 | // mechanism we just pass null Thread* to the MutexLock during that transition |
| 80 | // (see Thread::TransitionFromSuspendedToRunnable). |
| 81 | level == kThreadSuspendCountLock || |
Chao-ying Fu | 9e36931 | 2014-05-21 11:20:52 -0700 | [diff] [blame] | 82 | // Avoid recursive death. |
Alex Light | b284f8d | 2017-11-21 00:00:48 +0000 | [diff] [blame] | 83 | level == kAbortLock || |
| 84 | // Locks at the absolute top of the stack can be locked at any time. |
| 85 | level == kTopLockLevel) << level; |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 86 | } |
| 87 | } |
| 88 | |
Ian Rogers | b6c31ea | 2013-02-04 18:11:33 -0800 | [diff] [blame] | 89 | inline void BaseMutex::RegisterAsLocked(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 90 | if (UNLIKELY(self == nullptr)) { |
Ian Rogers | b6c31ea | 2013-02-04 18:11:33 -0800 | [diff] [blame] | 91 | CheckUnattachedThread(level_); |
| 92 | return; |
| 93 | } |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 94 | LockLevel level = level_; |
| 95 | // It would be nice to avoid this condition checking in the non-debug case, |
| 96 | // but that would make the various methods that check if a mutex is held not |
| 97 | // work properly for thread wait locks. Since the vast majority of lock |
| 98 | // acquisitions are not thread wait locks, this check should not be too |
| 99 | // expensive. |
| 100 | if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitLock) != nullptr) { |
| 101 | level = kThreadWaitWakeLock; |
| 102 | } |
Ian Rogers | b6c31ea | 2013-02-04 18:11:33 -0800 | [diff] [blame] | 103 | if (kDebugLocking) { |
| 104 | // Check if a bad Mutex of this level or lower is held. |
| 105 | bool bad_mutexes_held = false; |
Alex Light | b284f8d | 2017-11-21 00:00:48 +0000 | [diff] [blame] | 106 | // Specifically allow a kTopLockLevel lock to be gained when the current thread holds the |
| 107 | // mutator_lock_ exclusive. This is because we suspending when holding locks at this level is |
| 108 | // not allowed and if we hold the mutator_lock_ exclusive we must unsuspend stuff eventually |
| 109 | // so there are no deadlocks. |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 110 | if (level == kTopLockLevel && |
Alex Light | b284f8d | 2017-11-21 00:00:48 +0000 | [diff] [blame] | 111 | Locks::mutator_lock_->IsSharedHeld(self) && |
| 112 | !Locks::mutator_lock_->IsExclusiveHeld(self)) { |
| 113 | LOG(ERROR) << "Lock level violation: holding \"" << Locks::mutator_lock_->name_ << "\" " |
| 114 | << "(level " << kMutatorLock << " - " << static_cast<int>(kMutatorLock) |
| 115 | << ") non-exclusive while locking \"" << name_ << "\" " |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 116 | << "(level " << level << " - " << static_cast<int>(level) << ") a top level" |
Alex Light | b284f8d | 2017-11-21 00:00:48 +0000 | [diff] [blame] | 117 | << "mutex. This is not allowed."; |
| 118 | bad_mutexes_held = true; |
| 119 | } else if (this == Locks::mutator_lock_ && self->GetHeldMutex(kTopLockLevel) != nullptr) { |
| 120 | LOG(ERROR) << "Lock level violation. Locking mutator_lock_ while already having a " |
| 121 | << "kTopLevelLock (" << self->GetHeldMutex(kTopLockLevel)->name_ << "held is " |
| 122 | << "not allowed."; |
| 123 | bad_mutexes_held = true; |
| 124 | } |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 125 | for (int i = level; i >= 0; --i) { |
Andreas Gampe | d1fbcff | 2017-04-17 21:40:28 -0700 | [diff] [blame] | 126 | LockLevel lock_level_i = static_cast<LockLevel>(i); |
| 127 | BaseMutex* held_mutex = self->GetHeldMutex(lock_level_i); |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 128 | if (level == kTopLockLevel && |
Alex Light | b284f8d | 2017-11-21 00:00:48 +0000 | [diff] [blame] | 129 | lock_level_i == kMutatorLock && |
| 130 | Locks::mutator_lock_->IsExclusiveHeld(self)) { |
| 131 | // This is checked above. |
| 132 | continue; |
| 133 | } else if (UNLIKELY(held_mutex != nullptr) && lock_level_i != kAbortLock) { |
Elliott Hughes | 0f82716 | 2013-02-26 12:12:58 -0800 | [diff] [blame] | 134 | LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" " |
Andreas Gampe | d1fbcff | 2017-04-17 21:40:28 -0700 | [diff] [blame] | 135 | << "(level " << lock_level_i << " - " << i |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 136 | << ") while locking \"" << name_ << "\" " |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 137 | << "(level " << level << " - " << static_cast<int>(level) << ")"; |
Andreas Gampe | d1fbcff | 2017-04-17 21:40:28 -0700 | [diff] [blame] | 138 | if (lock_level_i > kAbortLock) { |
Ian Rogers | b6c31ea | 2013-02-04 18:11:33 -0800 | [diff] [blame] | 139 | // Only abort in the check below if this is more than abort level lock. |
| 140 | bad_mutexes_held = true; |
| 141 | } |
| 142 | } |
| 143 | } |
Nicolas Geoffray | db97871 | 2014-12-09 13:33:38 +0000 | [diff] [blame] | 144 | if (gAborting == 0) { // Avoid recursive aborts. |
| 145 | CHECK(!bad_mutexes_held); |
| 146 | } |
Ian Rogers | b6c31ea | 2013-02-04 18:11:33 -0800 | [diff] [blame] | 147 | } |
| 148 | // Don't record monitors as they are outside the scope of analysis. They may be inspected off of |
| 149 | // the monitor list. |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 150 | if (level != kMonitorLock) { |
| 151 | self->SetHeldMutex(level, this); |
Ian Rogers | b6c31ea | 2013-02-04 18:11:33 -0800 | [diff] [blame] | 152 | } |
| 153 | } |
| 154 | |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 155 | inline void BaseMutex::RegisterAsUnlocked(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 156 | if (UNLIKELY(self == nullptr)) { |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 157 | CheckUnattachedThread(level_); |
| 158 | return; |
| 159 | } |
| 160 | if (level_ != kMonitorLock) { |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 161 | auto level = level_; |
| 162 | if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitWakeLock) == this) { |
| 163 | level = kThreadWaitWakeLock; |
Charles Munger | 1ebb52c | 2018-10-25 15:37:14 -0700 | [diff] [blame] | 164 | } |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 165 | if (kDebugLocking && gAborting == 0) { // Avoid recursive aborts. |
| 166 | if (level == kThreadWaitWakeLock) { |
| 167 | CHECK(self->GetHeldMutex(kThreadWaitLock) != nullptr) << "Held " << kThreadWaitWakeLock << " without " << kThreadWaitLock;; |
| 168 | } |
| 169 | CHECK(self->GetHeldMutex(level) == this) << "Unlocking on unacquired mutex: " << name_; |
| 170 | } |
| 171 | self->SetHeldMutex(level, nullptr); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 172 | } |
| 173 | } |
| 174 | |
| 175 | inline void ReaderWriterMutex::SharedLock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 176 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 177 | #if ART_USE_FUTEXES |
| 178 | bool done = false; |
| 179 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 180 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Ian Rogers | c0fa3ad | 2013-02-05 00:11:55 -0800 | [diff] [blame] | 181 | if (LIKELY(cur_state >= 0)) { |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 182 | // Add as an extra reader. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 183 | done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 184 | } else { |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 185 | HandleSharedLockContention(self, cur_state); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 186 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 187 | } while (!done); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 188 | #else |
| 189 | CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_)); |
| 190 | #endif |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 191 | DCHECK(GetExclusiveOwnerTid() == 0 || GetExclusiveOwnerTid() == -1); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 192 | RegisterAsLocked(self); |
| 193 | AssertSharedHeld(self); |
| 194 | } |
| 195 | |
| 196 | inline void ReaderWriterMutex::SharedUnlock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 197 | DCHECK(self == nullptr || self == Thread::Current()); |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 198 | DCHECK(GetExclusiveOwnerTid() == 0 || GetExclusiveOwnerTid() == -1); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 199 | AssertSharedHeld(self); |
| 200 | RegisterAsUnlocked(self); |
| 201 | #if ART_USE_FUTEXES |
| 202 | bool done = false; |
| 203 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 204 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 205 | if (LIKELY(cur_state > 0)) { |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 206 | // Reduce state by 1 and impose lock release load/store ordering. |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame] | 207 | // Note, the num_contenders_ load below musn't reorder before the CompareAndSet. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 208 | done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, cur_state - 1); |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 209 | if (done && (cur_state - 1) == 0) { // Weak CAS may fail spuriously. |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame] | 210 | if (num_contenders_.load(std::memory_order_seq_cst) > 0) { |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 211 | // Wake any exclusive waiters as there are now no readers. |
Hans Boehm | 81dc7ab | 2019-04-19 17:34:31 -0700 | [diff] [blame] | 212 | futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 213 | } |
| 214 | } |
| 215 | } else { |
| 216 | LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_; |
| 217 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 218 | } while (!done); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 219 | #else |
| 220 | CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); |
| 221 | #endif |
| 222 | } |
| 223 | |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 224 | inline bool Mutex::IsExclusiveHeld(const Thread* self) const { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 225 | DCHECK(self == nullptr || self == Thread::Current()); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 226 | bool result = (GetExclusiveOwnerTid() == SafeGetTid(self)); |
| 227 | if (kDebugLocking) { |
| 228 | // Sanity debug check that if we think it is locked we have it in our held mutexes. |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 229 | if (result && self != nullptr && level_ != kMonitorLock && !gAborting) { |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 230 | if (level_ == kThreadWaitLock && self->GetHeldMutex(kThreadWaitLock) != this) { |
| 231 | CHECK_EQ(self->GetHeldMutex(kThreadWaitWakeLock), this); |
| 232 | } else { |
| 233 | CHECK_EQ(self->GetHeldMutex(level_), this); |
| 234 | } |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 235 | } |
| 236 | } |
| 237 | return result; |
| 238 | } |
| 239 | |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 240 | inline pid_t Mutex::GetExclusiveOwnerTid() const { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 241 | return exclusive_owner_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 242 | } |
| 243 | |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 244 | inline void Mutex::AssertExclusiveHeld(const Thread* self) const { |
| 245 | if (kDebugLocking && (gAborting == 0)) { |
| 246 | CHECK(IsExclusiveHeld(self)) << *this; |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | inline void Mutex::AssertHeld(const Thread* self) const { |
| 251 | AssertExclusiveHeld(self); |
| 252 | } |
| 253 | |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 254 | inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 255 | DCHECK(self == nullptr || self == Thread::Current()); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 256 | bool result = (GetExclusiveOwnerTid() == SafeGetTid(self)); |
| 257 | if (kDebugLocking) { |
| 258 | // Sanity that if the pthread thinks we own the lock the Thread agrees. |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 259 | if (self != nullptr && result) { |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 260 | CHECK_EQ(self->GetHeldMutex(level_), this); |
| 261 | } |
| 262 | } |
| 263 | return result; |
| 264 | } |
| 265 | |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 266 | inline pid_t ReaderWriterMutex::GetExclusiveOwnerTid() const { |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 267 | #if ART_USE_FUTEXES |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 268 | int32_t state = state_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 269 | if (state == 0) { |
| 270 | return 0; // No owner. |
| 271 | } else if (state > 0) { |
| 272 | return -1; // Shared. |
| 273 | } else { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 274 | return exclusive_owner_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 275 | } |
| 276 | #else |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 277 | return exclusive_owner_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 278 | #endif |
| 279 | } |
| 280 | |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 281 | inline void ReaderWriterMutex::AssertExclusiveHeld(const Thread* self) const { |
| 282 | if (kDebugLocking && (gAborting == 0)) { |
| 283 | CHECK(IsExclusiveHeld(self)) << *this; |
| 284 | } |
| 285 | } |
| 286 | |
| 287 | inline void ReaderWriterMutex::AssertWriterHeld(const Thread* self) const { |
| 288 | AssertExclusiveHeld(self); |
| 289 | } |
| 290 | |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 291 | inline void MutatorMutex::TransitionFromRunnableToSuspended(Thread* self) { |
| 292 | AssertSharedHeld(self); |
| 293 | RegisterAsUnlocked(self); |
| 294 | } |
| 295 | |
| 296 | inline void MutatorMutex::TransitionFromSuspendedToRunnable(Thread* self) { |
| 297 | RegisterAsLocked(self); |
| 298 | AssertSharedHeld(self); |
| 299 | } |
| 300 | |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 301 | inline ReaderMutexLock::ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) |
| 302 | : self_(self), mu_(mu) { |
| 303 | mu_.SharedLock(self_); |
| 304 | } |
| 305 | |
| 306 | inline ReaderMutexLock::~ReaderMutexLock() { |
| 307 | mu_.SharedUnlock(self_); |
| 308 | } |
| 309 | |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 310 | } // namespace art |
| 311 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 312 | #endif // ART_RUNTIME_BASE_MUTEX_INL_H_ |