blob: 5daead99015d8bd35de0e0ba6a332ac7b3ad53f6 [file] [log] [blame]
Ian Rogers693ff612013-02-01 10:56:12 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_BASE_MUTEX_INL_H_
18#define ART_RUNTIME_BASE_MUTEX_INL_H_
Ian Rogers693ff612013-02-01 10:56:12 -080019
Ian Rogers220228e2014-01-23 09:08:16 -080020#include <inttypes.h>
21
Ian Rogers693ff612013-02-01 10:56:12 -080022#include "mutex.h"
23
David Sehrc431b9d2018-03-02 12:01:51 -080024#include "base/utils.h"
Ian Rogerscf7f1912014-10-22 22:06:39 -070025#include "base/value_object.h"
Ian Rogers693ff612013-02-01 10:56:12 -080026#include "thread.h"
27
Ian Rogers693ff612013-02-01 10:56:12 -080028#if ART_USE_FUTEXES
29#include "linux/futex.h"
30#include "sys/syscall.h"
31#ifndef SYS_futex
32#define SYS_futex __NR_futex
33#endif
Chih-Hung Hsieh729c1cf2014-11-06 10:49:16 -080034#endif // ART_USE_FUTEXES
35
Ian Rogersd6d7c3b2014-11-06 14:26:29 -080036#define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_)
37
Chih-Hung Hsieh729c1cf2014-11-06 10:49:16 -080038namespace art {
39
40#if ART_USE_FUTEXES
Mathieu Chartier2cebb242015-04-21 16:50:40 -070041static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout,
42 volatile int *uaddr2, int val3) {
Ian Rogers693ff612013-02-01 10:56:12 -080043 return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
44}
45#endif // ART_USE_FUTEXES
46
Hans Boehm0882af22017-08-31 15:21:57 -070047// The following isn't strictly necessary, but we want updates on Atomic<pid_t> to be lock-free.
48// TODO: Use std::atomic::is_always_lock_free after switching to C++17 atomics.
49static_assert(sizeof(pid_t) <= sizeof(int32_t), "pid_t should fit in 32 bits");
50
51static inline pid_t SafeGetTid(const Thread* self) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -070052 if (self != nullptr) {
Hans Boehm0882af22017-08-31 15:21:57 -070053 return self->GetTid();
Ian Rogers693ff612013-02-01 10:56:12 -080054 } else {
Hans Boehm0882af22017-08-31 15:21:57 -070055 return GetTid();
Ian Rogers693ff612013-02-01 10:56:12 -080056 }
57}
58
59static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALYSIS {
60 // The check below enumerates the cases where we expect not to be able to sanity check locks
61 // on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock.
62 // TODO: tighten this check.
63 if (kDebugLocking) {
David Sehrf42eb2c2016-10-19 13:20:45 -070064 CHECK(!Locks::IsSafeToCallAbortRacy() ||
Chao-ying Fu9e369312014-05-21 11:20:52 -070065 // Used during thread creation to avoid races with runtime shutdown. Thread::Current not
66 // yet established.
67 level == kRuntimeShutdownLock ||
68 // Thread Ids are allocated/released before threads are established.
69 level == kAllocatedThreadIdsLock ||
70 // Thread LDT's are initialized without Thread::Current established.
71 level == kModifyLdtLock ||
72 // Threads are unregistered while holding the thread list lock, during this process they
73 // no longer exist and so we expect an unlock with no self.
74 level == kThreadListLock ||
75 // Ignore logging which may or may not have set up thread data structures.
76 level == kLoggingLock ||
Nicolas Geoffray9f5f8ac2016-06-29 14:39:59 +010077 // When transitioning from suspended to runnable, a daemon thread might be in
78 // a situation where the runtime is shutting down. To not crash our debug locking
79 // mechanism we just pass null Thread* to the MutexLock during that transition
80 // (see Thread::TransitionFromSuspendedToRunnable).
81 level == kThreadSuspendCountLock ||
Chao-ying Fu9e369312014-05-21 11:20:52 -070082 // Avoid recursive death.
Alex Lightb284f8d2017-11-21 00:00:48 +000083 level == kAbortLock ||
84 // Locks at the absolute top of the stack can be locked at any time.
85 level == kTopLockLevel) << level;
Ian Rogers693ff612013-02-01 10:56:12 -080086 }
87}
88
Ian Rogersb6c31ea2013-02-04 18:11:33 -080089inline void BaseMutex::RegisterAsLocked(Thread* self) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -070090 if (UNLIKELY(self == nullptr)) {
Ian Rogersb6c31ea2013-02-04 18:11:33 -080091 CheckUnattachedThread(level_);
92 return;
93 }
Charles Mungerc665d632018-11-06 16:20:13 +000094 LockLevel level = level_;
95 // It would be nice to avoid this condition checking in the non-debug case,
96 // but that would make the various methods that check if a mutex is held not
97 // work properly for thread wait locks. Since the vast majority of lock
98 // acquisitions are not thread wait locks, this check should not be too
99 // expensive.
100 if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitLock) != nullptr) {
101 level = kThreadWaitWakeLock;
102 }
Ian Rogersb6c31ea2013-02-04 18:11:33 -0800103 if (kDebugLocking) {
104 // Check if a bad Mutex of this level or lower is held.
105 bool bad_mutexes_held = false;
Alex Lightb284f8d2017-11-21 00:00:48 +0000106 // Specifically allow a kTopLockLevel lock to be gained when the current thread holds the
107 // mutator_lock_ exclusive. This is because we suspending when holding locks at this level is
108 // not allowed and if we hold the mutator_lock_ exclusive we must unsuspend stuff eventually
109 // so there are no deadlocks.
Charles Mungerc665d632018-11-06 16:20:13 +0000110 if (level == kTopLockLevel &&
Alex Lightb284f8d2017-11-21 00:00:48 +0000111 Locks::mutator_lock_->IsSharedHeld(self) &&
112 !Locks::mutator_lock_->IsExclusiveHeld(self)) {
113 LOG(ERROR) << "Lock level violation: holding \"" << Locks::mutator_lock_->name_ << "\" "
114 << "(level " << kMutatorLock << " - " << static_cast<int>(kMutatorLock)
115 << ") non-exclusive while locking \"" << name_ << "\" "
Charles Mungerc665d632018-11-06 16:20:13 +0000116 << "(level " << level << " - " << static_cast<int>(level) << ") a top level"
Alex Lightb284f8d2017-11-21 00:00:48 +0000117 << "mutex. This is not allowed.";
118 bad_mutexes_held = true;
119 } else if (this == Locks::mutator_lock_ && self->GetHeldMutex(kTopLockLevel) != nullptr) {
120 LOG(ERROR) << "Lock level violation. Locking mutator_lock_ while already having a "
121 << "kTopLevelLock (" << self->GetHeldMutex(kTopLockLevel)->name_ << "held is "
122 << "not allowed.";
123 bad_mutexes_held = true;
124 }
Charles Mungerc665d632018-11-06 16:20:13 +0000125 for (int i = level; i >= 0; --i) {
Andreas Gamped1fbcff2017-04-17 21:40:28 -0700126 LockLevel lock_level_i = static_cast<LockLevel>(i);
127 BaseMutex* held_mutex = self->GetHeldMutex(lock_level_i);
Charles Mungerc665d632018-11-06 16:20:13 +0000128 if (level == kTopLockLevel &&
Alex Lightb284f8d2017-11-21 00:00:48 +0000129 lock_level_i == kMutatorLock &&
130 Locks::mutator_lock_->IsExclusiveHeld(self)) {
131 // This is checked above.
132 continue;
133 } else if (UNLIKELY(held_mutex != nullptr) && lock_level_i != kAbortLock) {
Elliott Hughes0f827162013-02-26 12:12:58 -0800134 LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
Andreas Gamped1fbcff2017-04-17 21:40:28 -0700135 << "(level " << lock_level_i << " - " << i
Ian Rogers62d6c772013-02-27 08:32:07 -0800136 << ") while locking \"" << name_ << "\" "
Charles Mungerc665d632018-11-06 16:20:13 +0000137 << "(level " << level << " - " << static_cast<int>(level) << ")";
Andreas Gamped1fbcff2017-04-17 21:40:28 -0700138 if (lock_level_i > kAbortLock) {
Ian Rogersb6c31ea2013-02-04 18:11:33 -0800139 // Only abort in the check below if this is more than abort level lock.
140 bad_mutexes_held = true;
141 }
142 }
143 }
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000144 if (gAborting == 0) { // Avoid recursive aborts.
145 CHECK(!bad_mutexes_held);
146 }
Ian Rogersb6c31ea2013-02-04 18:11:33 -0800147 }
148 // Don't record monitors as they are outside the scope of analysis. They may be inspected off of
149 // the monitor list.
Charles Mungerc665d632018-11-06 16:20:13 +0000150 if (level != kMonitorLock) {
151 self->SetHeldMutex(level, this);
Ian Rogersb6c31ea2013-02-04 18:11:33 -0800152 }
153}
154
Ian Rogers693ff612013-02-01 10:56:12 -0800155inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700156 if (UNLIKELY(self == nullptr)) {
Ian Rogers693ff612013-02-01 10:56:12 -0800157 CheckUnattachedThread(level_);
158 return;
159 }
160 if (level_ != kMonitorLock) {
Charles Mungerc665d632018-11-06 16:20:13 +0000161 auto level = level_;
162 if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitWakeLock) == this) {
163 level = kThreadWaitWakeLock;
Charles Munger1ebb52c2018-10-25 15:37:14 -0700164 }
Charles Mungerc665d632018-11-06 16:20:13 +0000165 if (kDebugLocking && gAborting == 0) { // Avoid recursive aborts.
166 if (level == kThreadWaitWakeLock) {
167 CHECK(self->GetHeldMutex(kThreadWaitLock) != nullptr) << "Held " << kThreadWaitWakeLock << " without " << kThreadWaitLock;;
168 }
169 CHECK(self->GetHeldMutex(level) == this) << "Unlocking on unacquired mutex: " << name_;
170 }
171 self->SetHeldMutex(level, nullptr);
Ian Rogers693ff612013-02-01 10:56:12 -0800172 }
173}
174
175inline void ReaderWriterMutex::SharedLock(Thread* self) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700176 DCHECK(self == nullptr || self == Thread::Current());
Ian Rogers693ff612013-02-01 10:56:12 -0800177#if ART_USE_FUTEXES
178 bool done = false;
179 do {
Orion Hodson88591fe2018-03-06 13:35:43 +0000180 int32_t cur_state = state_.load(std::memory_order_relaxed);
Ian Rogersc0fa3ad2013-02-05 00:11:55 -0800181 if (LIKELY(cur_state >= 0)) {
Ian Rogers693ff612013-02-01 10:56:12 -0800182 // Add as an extra reader.
Orion Hodson4557b382018-01-03 11:47:54 +0000183 done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1);
Ian Rogers693ff612013-02-01 10:56:12 -0800184 } else {
Ian Rogerscf7f1912014-10-22 22:06:39 -0700185 HandleSharedLockContention(self, cur_state);
Ian Rogers693ff612013-02-01 10:56:12 -0800186 }
Brian Carlstromdf629502013-07-17 22:39:56 -0700187 } while (!done);
Ian Rogers693ff612013-02-01 10:56:12 -0800188#else
189 CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_));
190#endif
Hans Boehm0882af22017-08-31 15:21:57 -0700191 DCHECK(GetExclusiveOwnerTid() == 0 || GetExclusiveOwnerTid() == -1);
Ian Rogers693ff612013-02-01 10:56:12 -0800192 RegisterAsLocked(self);
193 AssertSharedHeld(self);
194}
195
196inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700197 DCHECK(self == nullptr || self == Thread::Current());
Hans Boehm0882af22017-08-31 15:21:57 -0700198 DCHECK(GetExclusiveOwnerTid() == 0 || GetExclusiveOwnerTid() == -1);
Ian Rogers693ff612013-02-01 10:56:12 -0800199 AssertSharedHeld(self);
200 RegisterAsUnlocked(self);
201#if ART_USE_FUTEXES
202 bool done = false;
203 do {
Orion Hodson88591fe2018-03-06 13:35:43 +0000204 int32_t cur_state = state_.load(std::memory_order_relaxed);
Ian Rogers693ff612013-02-01 10:56:12 -0800205 if (LIKELY(cur_state > 0)) {
Ian Rogersc7190692014-07-08 23:50:26 -0700206 // Reduce state by 1 and impose lock release load/store ordering.
Orion Hodson4557b382018-01-03 11:47:54 +0000207 // Note, the relaxed loads below musn't reorder before the CompareAndSet.
Ian Rogersc7190692014-07-08 23:50:26 -0700208 // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
209 // a status bit into the state on contention.
Orion Hodson4557b382018-01-03 11:47:54 +0000210 done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, cur_state - 1);
Ian Rogersc7190692014-07-08 23:50:26 -0700211 if (done && (cur_state - 1) == 0) { // Weak CAS may fail spuriously.
Hyangseok Chae240a5642018-07-25 16:45:08 +0900212 if (num_pending_writers_.load(std::memory_order_seq_cst) > 0 ||
213 num_pending_readers_.load(std::memory_order_seq_cst) > 0) {
Ian Rogers693ff612013-02-01 10:56:12 -0800214 // Wake any exclusive waiters as there are now no readers.
Charles Munger7530bae2018-10-29 20:03:51 -0700215 futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0);
Ian Rogers693ff612013-02-01 10:56:12 -0800216 }
217 }
218 } else {
219 LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
220 }
Brian Carlstromdf629502013-07-17 22:39:56 -0700221 } while (!done);
Ian Rogers693ff612013-02-01 10:56:12 -0800222#else
223 CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
224#endif
225}
226
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700227inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700228 DCHECK(self == nullptr || self == Thread::Current());
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700229 bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
230 if (kDebugLocking) {
231 // Sanity debug check that if we think it is locked we have it in our held mutexes.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700232 if (result && self != nullptr && level_ != kMonitorLock && !gAborting) {
Charles Mungerc665d632018-11-06 16:20:13 +0000233 if (level_ == kThreadWaitLock && self->GetHeldMutex(kThreadWaitLock) != this) {
234 CHECK_EQ(self->GetHeldMutex(kThreadWaitWakeLock), this);
235 } else {
236 CHECK_EQ(self->GetHeldMutex(level_), this);
237 }
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700238 }
239 }
240 return result;
241}
242
Hans Boehm0882af22017-08-31 15:21:57 -0700243inline pid_t Mutex::GetExclusiveOwnerTid() const {
Orion Hodson88591fe2018-03-06 13:35:43 +0000244 return exclusive_owner_.load(std::memory_order_relaxed);
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700245}
246
Andreas Gampeb486a982017-06-01 13:45:54 -0700247inline void Mutex::AssertExclusiveHeld(const Thread* self) const {
248 if (kDebugLocking && (gAborting == 0)) {
249 CHECK(IsExclusiveHeld(self)) << *this;
250 }
251}
252
253inline void Mutex::AssertHeld(const Thread* self) const {
254 AssertExclusiveHeld(self);
255}
256
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700257inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700258 DCHECK(self == nullptr || self == Thread::Current());
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700259 bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
260 if (kDebugLocking) {
261 // Sanity that if the pthread thinks we own the lock the Thread agrees.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700262 if (self != nullptr && result) {
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700263 CHECK_EQ(self->GetHeldMutex(level_), this);
264 }
265 }
266 return result;
267}
268
Hans Boehm0882af22017-08-31 15:21:57 -0700269inline pid_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700270#if ART_USE_FUTEXES
Orion Hodson88591fe2018-03-06 13:35:43 +0000271 int32_t state = state_.load(std::memory_order_relaxed);
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700272 if (state == 0) {
273 return 0; // No owner.
274 } else if (state > 0) {
275 return -1; // Shared.
276 } else {
Orion Hodson88591fe2018-03-06 13:35:43 +0000277 return exclusive_owner_.load(std::memory_order_relaxed);
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700278 }
279#else
Orion Hodson88591fe2018-03-06 13:35:43 +0000280 return exclusive_owner_.load(std::memory_order_relaxed);
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700281#endif
282}
283
Andreas Gampeb486a982017-06-01 13:45:54 -0700284inline void ReaderWriterMutex::AssertExclusiveHeld(const Thread* self) const {
285 if (kDebugLocking && (gAborting == 0)) {
286 CHECK(IsExclusiveHeld(self)) << *this;
287 }
288}
289
290inline void ReaderWriterMutex::AssertWriterHeld(const Thread* self) const {
291 AssertExclusiveHeld(self);
292}
293
Yu Lieac44242015-06-29 10:50:03 +0800294inline void MutatorMutex::TransitionFromRunnableToSuspended(Thread* self) {
295 AssertSharedHeld(self);
296 RegisterAsUnlocked(self);
297}
298
299inline void MutatorMutex::TransitionFromSuspendedToRunnable(Thread* self) {
300 RegisterAsLocked(self);
301 AssertSharedHeld(self);
302}
303
Andreas Gampeb486a982017-06-01 13:45:54 -0700304inline ReaderMutexLock::ReaderMutexLock(Thread* self, ReaderWriterMutex& mu)
305 : self_(self), mu_(mu) {
306 mu_.SharedLock(self_);
307}
308
309inline ReaderMutexLock::~ReaderMutexLock() {
310 mu_.SharedUnlock(self_);
311}
312
Ian Rogers693ff612013-02-01 10:56:12 -0800313} // namespace art
314
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700315#endif // ART_RUNTIME_BASE_MUTEX_INL_H_