Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1 | /* |
| 2 | * kernel/mutex.c |
| 3 | * |
| 4 | * Mutexes: blocking mutual exclusion locks |
| 5 | * |
| 6 | * Started by Ingo Molnar: |
| 7 | * |
| 8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 9 | * |
| 10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
| 11 | * David Howells for suggestions and improvements. |
| 12 | * |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
| 14 | * from the -rt tree, where it was originally implemented for rtmutexes |
| 15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale |
| 16 | * and Sven Dietrich. |
| 17 | * |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 18 | * Also see Documentation/mutex-design.txt. |
| 19 | */ |
| 20 | #include <linux/mutex.h> |
| 21 | #include <linux/sched.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 22 | #include <linux/sched/rt.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 23 | #include <linux/export.h> |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/interrupt.h> |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 26 | #include <linux/debug_locks.h> |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 27 | |
| 28 | /* |
| 29 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, |
| 30 | * which forces all calls into the slowpath: |
| 31 | */ |
| 32 | #ifdef CONFIG_DEBUG_MUTEXES |
| 33 | # include "mutex-debug.h" |
| 34 | # include <asm-generic/mutex-null.h> |
| 35 | #else |
| 36 | # include "mutex.h" |
| 37 | # include <asm/mutex.h> |
| 38 | #endif |
| 39 | |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 40 | /* |
Waiman Long | cc189d2 | 2013-04-17 15:23:14 -0400 | [diff] [blame] | 41 | * A negative mutex count indicates that waiters are sleeping waiting for the |
| 42 | * mutex. |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 43 | */ |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 44 | #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 45 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 46 | void |
| 47 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 48 | { |
| 49 | atomic_set(&lock->count, 1); |
| 50 | spin_lock_init(&lock->wait_lock); |
| 51 | INIT_LIST_HEAD(&lock->wait_list); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 52 | mutex_clear_owner(lock); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 53 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 54 | lock->spin_mlock = NULL; |
| 55 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 56 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 57 | debug_mutex_init(lock, name, key); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 58 | } |
| 59 | |
| 60 | EXPORT_SYMBOL(__mutex_init); |
| 61 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 62 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 63 | /* |
| 64 | * We split the mutex lock/unlock logic into separate fastpath and |
| 65 | * slowpath functions, to reduce the register pressure on the fastpath. |
| 66 | * We also put the fastpath first in the kernel image, to make sure the |
| 67 | * branch is predicted by the CPU as default-untaken. |
| 68 | */ |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 69 | static __used noinline void __sched |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 70 | __mutex_lock_slowpath(atomic_t *lock_count); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 71 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 72 | /** |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 73 | * mutex_lock - acquire the mutex |
| 74 | * @lock: the mutex to be acquired |
| 75 | * |
| 76 | * Lock the mutex exclusively for this task. If the mutex is not |
| 77 | * available right now, it will sleep until it can get it. |
| 78 | * |
| 79 | * The mutex must later on be released by the same task that |
| 80 | * acquired it. Recursive locking is not allowed. The task |
| 81 | * may not exit without first unlocking the mutex. Also, kernel |
| 82 | * memory where the mutex resides mutex must not be freed with |
| 83 | * the mutex still locked. The mutex must first be initialized |
| 84 | * (or statically defined) before it can be locked. memset()-ing |
| 85 | * the mutex to 0 is not allowed. |
| 86 | * |
| 87 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging |
| 88 | * checks that will enforce the restrictions and will also do |
| 89 | * deadlock debugging. ) |
| 90 | * |
| 91 | * This function is similar to (but not equivalent to) down(). |
| 92 | */ |
H. Peter Anvin | b09d250 | 2009-04-01 17:21:56 -0700 | [diff] [blame] | 93 | void __sched mutex_lock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 94 | { |
Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 95 | might_sleep(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 96 | /* |
| 97 | * The locking fastpath is the 1->0 transition from |
| 98 | * 'unlocked' into 'locked' state. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 99 | */ |
| 100 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 101 | mutex_set_owner(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | EXPORT_SYMBOL(mutex_lock); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 105 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 106 | |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 107 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 108 | /* |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 109 | * In order to avoid a stampede of mutex spinners from acquiring the mutex |
| 110 | * more or less simultaneously, the spinners need to acquire a MCS lock |
| 111 | * first before spinning on the owner field. |
| 112 | * |
| 113 | * We don't inline mspin_lock() so that perf can correctly account for the |
| 114 | * time spent in this lock function. |
| 115 | */ |
| 116 | struct mspin_node { |
| 117 | struct mspin_node *next ; |
| 118 | int locked; /* 1 if lock acquired */ |
| 119 | }; |
| 120 | #define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock)) |
| 121 | |
| 122 | static noinline |
| 123 | void mspin_lock(struct mspin_node **lock, struct mspin_node *node) |
| 124 | { |
| 125 | struct mspin_node *prev; |
| 126 | |
| 127 | /* Init node */ |
| 128 | node->locked = 0; |
| 129 | node->next = NULL; |
| 130 | |
| 131 | prev = xchg(lock, node); |
| 132 | if (likely(prev == NULL)) { |
| 133 | /* Lock acquired */ |
| 134 | node->locked = 1; |
| 135 | return; |
| 136 | } |
| 137 | ACCESS_ONCE(prev->next) = node; |
| 138 | smp_wmb(); |
| 139 | /* Wait until the lock holder passes the lock down */ |
| 140 | while (!ACCESS_ONCE(node->locked)) |
| 141 | arch_mutex_cpu_relax(); |
| 142 | } |
| 143 | |
| 144 | static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node) |
| 145 | { |
| 146 | struct mspin_node *next = ACCESS_ONCE(node->next); |
| 147 | |
| 148 | if (likely(!next)) { |
| 149 | /* |
| 150 | * Release the lock by setting it to NULL |
| 151 | */ |
| 152 | if (cmpxchg(lock, node, NULL) == node) |
| 153 | return; |
| 154 | /* Wait until the next pointer is set */ |
| 155 | while (!(next = ACCESS_ONCE(node->next))) |
| 156 | arch_mutex_cpu_relax(); |
| 157 | } |
| 158 | ACCESS_ONCE(next->locked) = 1; |
| 159 | smp_wmb(); |
| 160 | } |
| 161 | |
| 162 | /* |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 163 | * Mutex spinning code migrated from kernel/sched/core.c |
| 164 | */ |
| 165 | |
| 166 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) |
| 167 | { |
| 168 | if (lock->owner != owner) |
| 169 | return false; |
| 170 | |
| 171 | /* |
| 172 | * Ensure we emit the owner->on_cpu, dereference _after_ checking |
| 173 | * lock->owner still matches owner, if that fails, owner might |
| 174 | * point to free()d memory, if it still matches, the rcu_read_lock() |
| 175 | * ensures the memory stays valid. |
| 176 | */ |
| 177 | barrier(); |
| 178 | |
| 179 | return owner->on_cpu; |
| 180 | } |
| 181 | |
| 182 | /* |
| 183 | * Look out! "owner" is an entirely speculative pointer |
| 184 | * access and not reliable. |
| 185 | */ |
| 186 | static noinline |
| 187 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) |
| 188 | { |
| 189 | rcu_read_lock(); |
| 190 | while (owner_running(lock, owner)) { |
| 191 | if (need_resched()) |
| 192 | break; |
| 193 | |
| 194 | arch_mutex_cpu_relax(); |
| 195 | } |
| 196 | rcu_read_unlock(); |
| 197 | |
| 198 | /* |
| 199 | * We break out the loop above on need_resched() and when the |
| 200 | * owner changed, which is a sign for heavy contention. Return |
| 201 | * success only when lock->owner is NULL. |
| 202 | */ |
| 203 | return lock->owner == NULL; |
| 204 | } |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 205 | |
| 206 | /* |
| 207 | * Initial check for entering the mutex spinning loop |
| 208 | */ |
| 209 | static inline int mutex_can_spin_on_owner(struct mutex *lock) |
| 210 | { |
| 211 | int retval = 1; |
| 212 | |
| 213 | rcu_read_lock(); |
| 214 | if (lock->owner) |
| 215 | retval = lock->owner->on_cpu; |
| 216 | rcu_read_unlock(); |
| 217 | /* |
| 218 | * if lock->owner is not set, the mutex owner may have just acquired |
| 219 | * it and not set the owner yet or the mutex has been released. |
| 220 | */ |
| 221 | return retval; |
| 222 | } |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 223 | #endif |
| 224 | |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 225 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 226 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 227 | /** |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 228 | * mutex_unlock - release the mutex |
| 229 | * @lock: the mutex to be released |
| 230 | * |
| 231 | * Unlock a mutex that has been locked by this task previously. |
| 232 | * |
| 233 | * This function must not be used in interrupt context. Unlocking |
| 234 | * of a not locked mutex is not allowed. |
| 235 | * |
| 236 | * This function is similar to (but not equivalent to) up(). |
| 237 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 238 | void __sched mutex_unlock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 239 | { |
| 240 | /* |
| 241 | * The unlocking fastpath is the 0->1 transition from 'locked' |
| 242 | * into 'unlocked' state: |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 243 | */ |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 244 | #ifndef CONFIG_DEBUG_MUTEXES |
| 245 | /* |
| 246 | * When debugging is enabled we must not clear the owner before time, |
| 247 | * the slow path will always be taken, and that clears the owner field |
| 248 | * after verifying that it was indeed current. |
| 249 | */ |
| 250 | mutex_clear_owner(lock); |
| 251 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 252 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
| 253 | } |
| 254 | |
| 255 | EXPORT_SYMBOL(mutex_unlock); |
| 256 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 257 | /** |
| 258 | * ww_mutex_unlock - release the w/w mutex |
| 259 | * @lock: the mutex to be released |
| 260 | * |
| 261 | * Unlock a mutex that has been locked by this task previously with any of the |
| 262 | * ww_mutex_lock* functions (with or without an acquire context). It is |
| 263 | * forbidden to release the locks after releasing the acquire context. |
| 264 | * |
| 265 | * This function must not be used in interrupt context. Unlocking |
| 266 | * of a unlocked mutex is not allowed. |
| 267 | */ |
| 268 | void __sched ww_mutex_unlock(struct ww_mutex *lock) |
| 269 | { |
| 270 | /* |
| 271 | * The unlocking fastpath is the 0->1 transition from 'locked' |
| 272 | * into 'unlocked' state: |
| 273 | */ |
| 274 | if (lock->ctx) { |
| 275 | #ifdef CONFIG_DEBUG_MUTEXES |
| 276 | DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); |
| 277 | #endif |
| 278 | if (lock->ctx->acquired > 0) |
| 279 | lock->ctx->acquired--; |
| 280 | lock->ctx = NULL; |
| 281 | } |
| 282 | |
| 283 | #ifndef CONFIG_DEBUG_MUTEXES |
| 284 | /* |
| 285 | * When debugging is enabled we must not clear the owner before time, |
| 286 | * the slow path will always be taken, and that clears the owner field |
| 287 | * after verifying that it was indeed current. |
| 288 | */ |
| 289 | mutex_clear_owner(&lock->base); |
| 290 | #endif |
| 291 | __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath); |
| 292 | } |
| 293 | EXPORT_SYMBOL(ww_mutex_unlock); |
| 294 | |
| 295 | static inline int __sched |
| 296 | __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) |
| 297 | { |
| 298 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
| 299 | struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); |
| 300 | |
| 301 | if (!hold_ctx) |
| 302 | return 0; |
| 303 | |
| 304 | if (unlikely(ctx == hold_ctx)) |
| 305 | return -EALREADY; |
| 306 | |
| 307 | if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && |
| 308 | (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { |
| 309 | #ifdef CONFIG_DEBUG_MUTEXES |
| 310 | DEBUG_LOCKS_WARN_ON(ctx->contending_lock); |
| 311 | ctx->contending_lock = ww; |
| 312 | #endif |
| 313 | return -EDEADLK; |
| 314 | } |
| 315 | |
| 316 | return 0; |
| 317 | } |
| 318 | |
| 319 | static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, |
| 320 | struct ww_acquire_ctx *ww_ctx) |
| 321 | { |
| 322 | #ifdef CONFIG_DEBUG_MUTEXES |
| 323 | /* |
| 324 | * If this WARN_ON triggers, you used ww_mutex_lock to acquire, |
| 325 | * but released with a normal mutex_unlock in this call. |
| 326 | * |
| 327 | * This should never happen, always use ww_mutex_unlock. |
| 328 | */ |
| 329 | DEBUG_LOCKS_WARN_ON(ww->ctx); |
| 330 | |
| 331 | /* |
| 332 | * Not quite done after calling ww_acquire_done() ? |
| 333 | */ |
| 334 | DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); |
| 335 | |
| 336 | if (ww_ctx->contending_lock) { |
| 337 | /* |
| 338 | * After -EDEADLK you tried to |
| 339 | * acquire a different ww_mutex? Bad! |
| 340 | */ |
| 341 | DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); |
| 342 | |
| 343 | /* |
| 344 | * You called ww_mutex_lock after receiving -EDEADLK, |
| 345 | * but 'forgot' to unlock everything else first? |
| 346 | */ |
| 347 | DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); |
| 348 | ww_ctx->contending_lock = NULL; |
| 349 | } |
| 350 | |
| 351 | /* |
| 352 | * Naughty, using a different class will lead to undefined behavior! |
| 353 | */ |
| 354 | DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); |
| 355 | #endif |
| 356 | ww_ctx->acquired++; |
| 357 | } |
| 358 | |
| 359 | /* |
| 360 | * after acquiring lock with fastpath or when we lost out in contested |
| 361 | * slowpath, set ctx and wake up any waiters so they can recheck. |
| 362 | * |
| 363 | * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, |
| 364 | * as the fastpath and opportunistic spinning are disabled in that case. |
| 365 | */ |
| 366 | static __always_inline void |
| 367 | ww_mutex_set_context_fastpath(struct ww_mutex *lock, |
| 368 | struct ww_acquire_ctx *ctx) |
| 369 | { |
| 370 | unsigned long flags; |
| 371 | struct mutex_waiter *cur; |
| 372 | |
| 373 | ww_mutex_lock_acquired(lock, ctx); |
| 374 | |
| 375 | lock->ctx = ctx; |
| 376 | |
| 377 | /* |
| 378 | * The lock->ctx update should be visible on all cores before |
| 379 | * the atomic read is done, otherwise contended waiters might be |
| 380 | * missed. The contended waiters will either see ww_ctx == NULL |
| 381 | * and keep spinning, or it will acquire wait_lock, add itself |
| 382 | * to waiter list and sleep. |
| 383 | */ |
| 384 | smp_mb(); /* ^^^ */ |
| 385 | |
| 386 | /* |
| 387 | * Check if lock is contended, if not there is nobody to wake up |
| 388 | */ |
| 389 | if (likely(atomic_read(&lock->base.count) == 0)) |
| 390 | return; |
| 391 | |
| 392 | /* |
| 393 | * Uh oh, we raced in fastpath, wake up everyone in this case, |
| 394 | * so they can see the new lock->ctx. |
| 395 | */ |
| 396 | spin_lock_mutex(&lock->base.wait_lock, flags); |
| 397 | list_for_each_entry(cur, &lock->base.wait_list, list) { |
| 398 | debug_mutex_wake_waiter(&lock->base, cur); |
| 399 | wake_up_process(cur->task); |
| 400 | } |
| 401 | spin_unlock_mutex(&lock->base.wait_lock, flags); |
| 402 | } |
| 403 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 404 | /* |
| 405 | * Lock a mutex (possibly interruptible), slowpath: |
| 406 | */ |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 407 | static __always_inline int __sched |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 408 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 409 | struct lockdep_map *nest_lock, unsigned long ip, |
| 410 | struct ww_acquire_ctx *ww_ctx) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 411 | { |
| 412 | struct task_struct *task = current; |
| 413 | struct mutex_waiter waiter; |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 414 | unsigned long flags; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 415 | int ret; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 416 | |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 417 | preempt_disable(); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 418 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
Frederic Weisbecker | c022602 | 2009-12-02 20:49:16 +0100 | [diff] [blame] | 419 | |
| 420 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 421 | /* |
| 422 | * Optimistic spinning. |
| 423 | * |
| 424 | * We try to spin for acquisition when we find that there are no |
| 425 | * pending waiters and the lock owner is currently running on a |
| 426 | * (different) CPU. |
| 427 | * |
| 428 | * The rationale is that if the lock owner is running, it is likely to |
| 429 | * release the lock soon. |
| 430 | * |
| 431 | * Since this needs the lock owner, and this mutex implementation |
| 432 | * doesn't track the owner atomically in the lock field, we need to |
| 433 | * track it non-atomically. |
| 434 | * |
| 435 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock |
| 436 | * to serialize everything. |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 437 | * |
| 438 | * The mutex spinners are queued up using MCS lock so that only one |
| 439 | * spinner can compete for the mutex. However, if mutex spinning isn't |
| 440 | * going to happen, there is no point in going through the lock/unlock |
| 441 | * overhead. |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 442 | */ |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 443 | if (!mutex_can_spin_on_owner(lock)) |
| 444 | goto slowpath; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 445 | |
| 446 | for (;;) { |
Peter Zijlstra | c6eb3dd | 2011-04-05 17:23:41 +0200 | [diff] [blame] | 447 | struct task_struct *owner; |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 448 | struct mspin_node node; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 449 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 450 | if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { |
| 451 | struct ww_mutex *ww; |
| 452 | |
| 453 | ww = container_of(lock, struct ww_mutex, base); |
| 454 | /* |
| 455 | * If ww->ctx is set the contents are undefined, only |
| 456 | * by acquiring wait_lock there is a guarantee that |
| 457 | * they are not invalid when reading. |
| 458 | * |
| 459 | * As such, when deadlock detection needs to be |
| 460 | * performed the optimistic spinning cannot be done. |
| 461 | */ |
| 462 | if (ACCESS_ONCE(ww->ctx)) |
| 463 | break; |
| 464 | } |
| 465 | |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 466 | /* |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 467 | * If there's an owner, wait for it to either |
| 468 | * release the lock or go to sleep. |
| 469 | */ |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 470 | mspin_lock(MLOCK(lock), &node); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 471 | owner = ACCESS_ONCE(lock->owner); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 472 | if (owner && !mutex_spin_on_owner(lock, owner)) { |
| 473 | mspin_unlock(MLOCK(lock), &node); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 474 | break; |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 475 | } |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 476 | |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 477 | if ((atomic_read(&lock->count) == 1) && |
| 478 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { |
Chris Mason | ac6e60e | 2009-01-14 17:29:31 +0100 | [diff] [blame] | 479 | lock_acquired(&lock->dep_map, ip); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 480 | if (!__builtin_constant_p(ww_ctx == NULL)) { |
| 481 | struct ww_mutex *ww; |
| 482 | ww = container_of(lock, struct ww_mutex, base); |
| 483 | |
| 484 | ww_mutex_set_context_fastpath(ww, ww_ctx); |
| 485 | } |
| 486 | |
Chris Mason | ac6e60e | 2009-01-14 17:29:31 +0100 | [diff] [blame] | 487 | mutex_set_owner(lock); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 488 | mspin_unlock(MLOCK(lock), &node); |
Chris Mason | ac6e60e | 2009-01-14 17:29:31 +0100 | [diff] [blame] | 489 | preempt_enable(); |
| 490 | return 0; |
| 491 | } |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 492 | mspin_unlock(MLOCK(lock), &node); |
Chris Mason | ac6e60e | 2009-01-14 17:29:31 +0100 | [diff] [blame] | 493 | |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 494 | /* |
| 495 | * When there's no owner, we might have preempted between the |
| 496 | * owner acquiring the lock and setting the owner field. If |
| 497 | * we're an RT task that will live-lock because we won't let |
| 498 | * the owner complete. |
| 499 | */ |
| 500 | if (!owner && (need_resched() || rt_task(task))) |
| 501 | break; |
| 502 | |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 503 | /* |
| 504 | * The cpu_relax() call is a compiler barrier which forces |
| 505 | * everything in this loop to be re-loaded. We don't need |
| 506 | * memory barriers as we'll eventually observe the right |
| 507 | * values at the cost of a few extra spins. |
| 508 | */ |
Gerald Schaefer | 335d7af | 2010-11-22 15:47:36 +0100 | [diff] [blame] | 509 | arch_mutex_cpu_relax(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 510 | } |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 511 | slowpath: |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 512 | #endif |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 513 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 514 | |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 515 | debug_mutex_lock_common(lock, &waiter); |
Roman Zippel | c9f4f06 | 2007-05-09 02:35:16 -0700 | [diff] [blame] | 516 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 517 | |
| 518 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
| 519 | list_add_tail(&waiter.list, &lock->wait_list); |
| 520 | waiter.task = task; |
| 521 | |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 522 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) |
Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 523 | goto done; |
| 524 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 525 | lock_contended(&lock->dep_map, ip); |
Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 526 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 527 | for (;;) { |
| 528 | /* |
| 529 | * Lets try to take the lock again - this is needed even if |
| 530 | * we get here for the first time (shortly after failing to |
| 531 | * acquire the lock), to make sure that we get a wakeup once |
| 532 | * it's unlocked. Later on, if we sleep, this is the |
| 533 | * operation that gives us the lock. We xchg it to -1, so |
| 534 | * that when we release the lock, we properly wake up the |
| 535 | * other waiters: |
| 536 | */ |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 537 | if (MUTEX_SHOW_NO_WAITER(lock) && |
| 538 | (atomic_xchg(&lock->count, -1) == 1)) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 539 | break; |
| 540 | |
| 541 | /* |
| 542 | * got a signal? (This code gets eliminated in the |
| 543 | * TASK_UNINTERRUPTIBLE case.) |
| 544 | */ |
Oleg Nesterov | 6ad3676 | 2008-06-08 21:20:42 +0400 | [diff] [blame] | 545 | if (unlikely(signal_pending_state(state, task))) { |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 546 | ret = -EINTR; |
| 547 | goto err; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 548 | } |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 549 | |
| 550 | if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { |
| 551 | ret = __mutex_lock_check_stamp(lock, ww_ctx); |
| 552 | if (ret) |
| 553 | goto err; |
| 554 | } |
| 555 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 556 | __set_task_state(task, state); |
| 557 | |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 558 | /* didn't get the lock, go to sleep: */ |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 559 | spin_unlock_mutex(&lock->wait_lock, flags); |
Thomas Gleixner | bd2f553 | 2011-03-21 12:33:18 +0100 | [diff] [blame] | 560 | schedule_preempt_disabled(); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 561 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 562 | } |
| 563 | |
Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 564 | done: |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 565 | lock_acquired(&lock->dep_map, ip); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 566 | /* got the lock - rejoice! */ |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 567 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
| 568 | mutex_set_owner(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 569 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 570 | if (!__builtin_constant_p(ww_ctx == NULL)) { |
| 571 | struct ww_mutex *ww = container_of(lock, |
| 572 | struct ww_mutex, |
| 573 | base); |
| 574 | struct mutex_waiter *cur; |
| 575 | |
| 576 | /* |
| 577 | * This branch gets optimized out for the common case, |
| 578 | * and is only important for ww_mutex_lock. |
| 579 | */ |
| 580 | |
| 581 | ww_mutex_lock_acquired(ww, ww_ctx); |
| 582 | ww->ctx = ww_ctx; |
| 583 | |
| 584 | /* |
| 585 | * Give any possible sleeping processes the chance to wake up, |
| 586 | * so they can recheck if they have to back off. |
| 587 | */ |
| 588 | list_for_each_entry(cur, &lock->wait_list, list) { |
| 589 | debug_mutex_wake_waiter(lock, cur); |
| 590 | wake_up_process(cur->task); |
| 591 | } |
| 592 | } |
| 593 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 594 | /* set it to 0 if there are no waiters left: */ |
| 595 | if (likely(list_empty(&lock->wait_list))) |
| 596 | atomic_set(&lock->count, 0); |
| 597 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 598 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 599 | |
| 600 | debug_mutex_free_waiter(&waiter); |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 601 | preempt_enable(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 602 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 603 | return 0; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 604 | |
| 605 | err: |
| 606 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); |
| 607 | spin_unlock_mutex(&lock->wait_lock, flags); |
| 608 | debug_mutex_free_waiter(&waiter); |
| 609 | mutex_release(&lock->dep_map, 1, ip); |
| 610 | preempt_enable(); |
| 611 | return ret; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 612 | } |
| 613 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 614 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 615 | void __sched |
| 616 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
| 617 | { |
| 618 | might_sleep(); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 619 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
| 620 | subclass, NULL, _RET_IP_, NULL); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 621 | } |
| 622 | |
| 623 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 624 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 625 | void __sched |
| 626 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) |
| 627 | { |
| 628 | might_sleep(); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 629 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
| 630 | 0, nest, _RET_IP_, NULL); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 631 | } |
| 632 | |
| 633 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
| 634 | |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 635 | int __sched |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 636 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) |
| 637 | { |
| 638 | might_sleep(); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 639 | return __mutex_lock_common(lock, TASK_KILLABLE, |
| 640 | subclass, NULL, _RET_IP_, NULL); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 641 | } |
| 642 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); |
| 643 | |
| 644 | int __sched |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 645 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
| 646 | { |
| 647 | might_sleep(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 648 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 649 | subclass, NULL, _RET_IP_, NULL); |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 650 | } |
| 651 | |
| 652 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 653 | |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 654 | static inline int |
| 655 | ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 656 | { |
| 657 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH |
| 658 | unsigned tmp; |
| 659 | |
| 660 | if (ctx->deadlock_inject_countdown-- == 0) { |
| 661 | tmp = ctx->deadlock_inject_interval; |
| 662 | if (tmp > UINT_MAX/4) |
| 663 | tmp = UINT_MAX; |
| 664 | else |
| 665 | tmp = tmp*2 + tmp + tmp/2; |
| 666 | |
| 667 | ctx->deadlock_inject_interval = tmp; |
| 668 | ctx->deadlock_inject_countdown = tmp; |
| 669 | ctx->contending_lock = lock; |
| 670 | |
| 671 | ww_mutex_unlock(lock); |
| 672 | |
| 673 | return -EDEADLK; |
| 674 | } |
| 675 | #endif |
| 676 | |
| 677 | return 0; |
| 678 | } |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 679 | |
| 680 | int __sched |
| 681 | __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 682 | { |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 683 | int ret; |
| 684 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 685 | might_sleep(); |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 686 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 687 | 0, &ctx->dep_map, _RET_IP_, ctx); |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 688 | if (!ret && ctx->acquired > 0) |
| 689 | return ww_mutex_deadlock_injection(lock, ctx); |
| 690 | |
| 691 | return ret; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 692 | } |
| 693 | EXPORT_SYMBOL_GPL(__ww_mutex_lock); |
| 694 | |
| 695 | int __sched |
| 696 | __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 697 | { |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 698 | int ret; |
| 699 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 700 | might_sleep(); |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 701 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, |
| 702 | 0, &ctx->dep_map, _RET_IP_, ctx); |
| 703 | |
| 704 | if (!ret && ctx->acquired > 0) |
| 705 | return ww_mutex_deadlock_injection(lock, ctx); |
| 706 | |
| 707 | return ret; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 708 | } |
| 709 | EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); |
| 710 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 711 | #endif |
| 712 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 713 | /* |
| 714 | * Release the lock, slowpath: |
| 715 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 716 | static inline void |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 717 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 718 | { |
Ingo Molnar | 0270664 | 2006-01-10 23:15:02 +0100 | [diff] [blame] | 719 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 720 | unsigned long flags; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 721 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 722 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 723 | mutex_release(&lock->dep_map, nested, _RET_IP_); |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 724 | debug_mutex_unlock(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 725 | |
| 726 | /* |
| 727 | * some architectures leave the lock unlocked in the fastpath failure |
| 728 | * case, others need to leave it locked. In the later case we have to |
| 729 | * unlock it here |
| 730 | */ |
| 731 | if (__mutex_slowpath_needs_to_unlock()) |
| 732 | atomic_set(&lock->count, 1); |
| 733 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 734 | if (!list_empty(&lock->wait_list)) { |
| 735 | /* get the first entry from the wait-list: */ |
| 736 | struct mutex_waiter *waiter = |
| 737 | list_entry(lock->wait_list.next, |
| 738 | struct mutex_waiter, list); |
| 739 | |
| 740 | debug_mutex_wake_waiter(lock, waiter); |
| 741 | |
| 742 | wake_up_process(waiter->task); |
| 743 | } |
| 744 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 745 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 746 | } |
| 747 | |
| 748 | /* |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 749 | * Release the lock, slowpath: |
| 750 | */ |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 751 | static __used noinline void |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 752 | __mutex_unlock_slowpath(atomic_t *lock_count) |
| 753 | { |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 754 | __mutex_unlock_common_slowpath(lock_count, 1); |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 755 | } |
| 756 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 757 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 758 | /* |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 759 | * Here come the less common (and hence less performance-critical) APIs: |
| 760 | * mutex_lock_interruptible() and mutex_trylock(). |
| 761 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 762 | static noinline int __sched |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 763 | __mutex_lock_killable_slowpath(struct mutex *lock); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 764 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 765 | static noinline int __sched |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 766 | __mutex_lock_interruptible_slowpath(struct mutex *lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 767 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 768 | /** |
| 769 | * mutex_lock_interruptible - acquire the mutex, interruptible |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 770 | * @lock: the mutex to be acquired |
| 771 | * |
| 772 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has |
| 773 | * been acquired or sleep until the mutex becomes available. If a |
| 774 | * signal arrives while waiting for the lock then this function |
| 775 | * returns -EINTR. |
| 776 | * |
| 777 | * This function is similar to (but not equivalent to) down_interruptible(). |
| 778 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 779 | int __sched mutex_lock_interruptible(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 780 | { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 781 | int ret; |
| 782 | |
Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 783 | might_sleep(); |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 784 | ret = __mutex_fastpath_lock_retval(&lock->count); |
| 785 | if (likely(!ret)) { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 786 | mutex_set_owner(lock); |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 787 | return 0; |
| 788 | } else |
| 789 | return __mutex_lock_interruptible_slowpath(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 790 | } |
| 791 | |
| 792 | EXPORT_SYMBOL(mutex_lock_interruptible); |
| 793 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 794 | int __sched mutex_lock_killable(struct mutex *lock) |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 795 | { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 796 | int ret; |
| 797 | |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 798 | might_sleep(); |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 799 | ret = __mutex_fastpath_lock_retval(&lock->count); |
| 800 | if (likely(!ret)) { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 801 | mutex_set_owner(lock); |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 802 | return 0; |
| 803 | } else |
| 804 | return __mutex_lock_killable_slowpath(lock); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 805 | } |
| 806 | EXPORT_SYMBOL(mutex_lock_killable); |
| 807 | |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 808 | static __used noinline void __sched |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 809 | __mutex_lock_slowpath(atomic_t *lock_count) |
| 810 | { |
| 811 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 812 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 813 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, |
| 814 | NULL, _RET_IP_, NULL); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 815 | } |
| 816 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 817 | static noinline int __sched |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 818 | __mutex_lock_killable_slowpath(struct mutex *lock) |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 819 | { |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 820 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, |
| 821 | NULL, _RET_IP_, NULL); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 822 | } |
| 823 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 824 | static noinline int __sched |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 825 | __mutex_lock_interruptible_slowpath(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 826 | { |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 827 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, |
| 828 | NULL, _RET_IP_, NULL); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 829 | } |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 830 | |
| 831 | static noinline int __sched |
| 832 | __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 833 | { |
| 834 | return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, |
| 835 | NULL, _RET_IP_, ctx); |
| 836 | } |
| 837 | |
| 838 | static noinline int __sched |
| 839 | __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, |
| 840 | struct ww_acquire_ctx *ctx) |
| 841 | { |
| 842 | return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, |
| 843 | NULL, _RET_IP_, ctx); |
| 844 | } |
| 845 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 846 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 847 | |
| 848 | /* |
| 849 | * Spinlock based trylock, we take the spinlock and check whether we |
| 850 | * can get the lock: |
| 851 | */ |
| 852 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) |
| 853 | { |
| 854 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 855 | unsigned long flags; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 856 | int prev; |
| 857 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 858 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 859 | |
| 860 | prev = atomic_xchg(&lock->count, -1); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 861 | if (likely(prev == 1)) { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 862 | mutex_set_owner(lock); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 863 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| 864 | } |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 865 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 866 | /* Set it back to 0 if there are no waiters: */ |
| 867 | if (likely(list_empty(&lock->wait_list))) |
| 868 | atomic_set(&lock->count, 0); |
| 869 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 870 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 871 | |
| 872 | return prev == 1; |
| 873 | } |
| 874 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 875 | /** |
| 876 | * mutex_trylock - try to acquire the mutex, without waiting |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 877 | * @lock: the mutex to be acquired |
| 878 | * |
| 879 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
| 880 | * has been acquired successfully, and 0 on contention. |
| 881 | * |
| 882 | * NOTE: this function follows the spin_trylock() convention, so |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 883 | * it is negated from the down_trylock() return values! Be careful |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 884 | * about this when converting semaphore users to mutexes. |
| 885 | * |
| 886 | * This function must not be used in interrupt context. The |
| 887 | * mutex must be released by the same task that acquired it. |
| 888 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 889 | int __sched mutex_trylock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 890 | { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 891 | int ret; |
| 892 | |
| 893 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); |
| 894 | if (ret) |
| 895 | mutex_set_owner(lock); |
| 896 | |
| 897 | return ret; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 898 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 899 | EXPORT_SYMBOL(mutex_trylock); |
Andrew Morton | a511e3f | 2009-04-29 15:59:58 -0700 | [diff] [blame] | 900 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 901 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 902 | int __sched |
| 903 | __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 904 | { |
| 905 | int ret; |
| 906 | |
| 907 | might_sleep(); |
| 908 | |
| 909 | ret = __mutex_fastpath_lock_retval(&lock->base.count); |
| 910 | |
| 911 | if (likely(!ret)) { |
| 912 | ww_mutex_set_context_fastpath(lock, ctx); |
| 913 | mutex_set_owner(&lock->base); |
| 914 | } else |
| 915 | ret = __ww_mutex_lock_slowpath(lock, ctx); |
| 916 | return ret; |
| 917 | } |
| 918 | EXPORT_SYMBOL(__ww_mutex_lock); |
| 919 | |
| 920 | int __sched |
| 921 | __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 922 | { |
| 923 | int ret; |
| 924 | |
| 925 | might_sleep(); |
| 926 | |
| 927 | ret = __mutex_fastpath_lock_retval(&lock->base.count); |
| 928 | |
| 929 | if (likely(!ret)) { |
| 930 | ww_mutex_set_context_fastpath(lock, ctx); |
| 931 | mutex_set_owner(&lock->base); |
| 932 | } else |
| 933 | ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx); |
| 934 | return ret; |
| 935 | } |
| 936 | EXPORT_SYMBOL(__ww_mutex_lock_interruptible); |
| 937 | |
| 938 | #endif |
| 939 | |
Andrew Morton | a511e3f | 2009-04-29 15:59:58 -0700 | [diff] [blame] | 940 | /** |
| 941 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 |
| 942 | * @cnt: the atomic which we are to dec |
| 943 | * @lock: the mutex to return holding if we dec to 0 |
| 944 | * |
| 945 | * return true and hold lock if we dec to 0, return false otherwise |
| 946 | */ |
| 947 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) |
| 948 | { |
| 949 | /* dec if we can't possibly hit 0 */ |
| 950 | if (atomic_add_unless(cnt, -1, 1)) |
| 951 | return 0; |
| 952 | /* we might hit 0, so take the lock */ |
| 953 | mutex_lock(lock); |
| 954 | if (!atomic_dec_and_test(cnt)) { |
| 955 | /* when we actually did the dec, we didn't hit 0 */ |
| 956 | mutex_unlock(lock); |
| 957 | return 0; |
| 958 | } |
| 959 | /* we hit 0, and we hold the lock */ |
| 960 | return 1; |
| 961 | } |
| 962 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |