Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1 | /* |
| 2 | * kernel/mutex.c |
| 3 | * |
| 4 | * Mutexes: blocking mutual exclusion locks |
| 5 | * |
| 6 | * Started by Ingo Molnar: |
| 7 | * |
| 8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 9 | * |
| 10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
| 11 | * David Howells for suggestions and improvements. |
| 12 | * |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
| 14 | * from the -rt tree, where it was originally implemented for rtmutexes |
| 15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale |
| 16 | * and Sven Dietrich. |
| 17 | * |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 18 | * Also see Documentation/mutex-design.txt. |
| 19 | */ |
| 20 | #include <linux/mutex.h> |
| 21 | #include <linux/sched.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 22 | #include <linux/sched/rt.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 23 | #include <linux/export.h> |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/interrupt.h> |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 26 | #include <linux/debug_locks.h> |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 27 | |
| 28 | /* |
| 29 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, |
| 30 | * which forces all calls into the slowpath: |
| 31 | */ |
| 32 | #ifdef CONFIG_DEBUG_MUTEXES |
| 33 | # include "mutex-debug.h" |
| 34 | # include <asm-generic/mutex-null.h> |
| 35 | #else |
| 36 | # include "mutex.h" |
| 37 | # include <asm/mutex.h> |
| 38 | #endif |
| 39 | |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 40 | /* |
| 41 | * A mutex count of -1 indicates that waiters are sleeping waiting for the |
| 42 | * mutex. Some architectures can allow any negative number, not just -1, for |
| 43 | * this purpose. |
| 44 | */ |
| 45 | #ifdef __ARCH_ALLOW_ANY_NEGATIVE_MUTEX_COUNT |
| 46 | #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) |
| 47 | #else |
| 48 | #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) != -1) |
| 49 | #endif |
| 50 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 51 | void |
| 52 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 53 | { |
| 54 | atomic_set(&lock->count, 1); |
| 55 | spin_lock_init(&lock->wait_lock); |
| 56 | INIT_LIST_HEAD(&lock->wait_list); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 57 | mutex_clear_owner(lock); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame^] | 58 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 59 | lock->spin_mlock = NULL; |
| 60 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 61 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 62 | debug_mutex_init(lock, name, key); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 63 | } |
| 64 | |
| 65 | EXPORT_SYMBOL(__mutex_init); |
| 66 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 67 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 68 | /* |
| 69 | * We split the mutex lock/unlock logic into separate fastpath and |
| 70 | * slowpath functions, to reduce the register pressure on the fastpath. |
| 71 | * We also put the fastpath first in the kernel image, to make sure the |
| 72 | * branch is predicted by the CPU as default-untaken. |
| 73 | */ |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 74 | static __used noinline void __sched |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 75 | __mutex_lock_slowpath(atomic_t *lock_count); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 76 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 77 | /** |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 78 | * mutex_lock - acquire the mutex |
| 79 | * @lock: the mutex to be acquired |
| 80 | * |
| 81 | * Lock the mutex exclusively for this task. If the mutex is not |
| 82 | * available right now, it will sleep until it can get it. |
| 83 | * |
| 84 | * The mutex must later on be released by the same task that |
| 85 | * acquired it. Recursive locking is not allowed. The task |
| 86 | * may not exit without first unlocking the mutex. Also, kernel |
| 87 | * memory where the mutex resides mutex must not be freed with |
| 88 | * the mutex still locked. The mutex must first be initialized |
| 89 | * (or statically defined) before it can be locked. memset()-ing |
| 90 | * the mutex to 0 is not allowed. |
| 91 | * |
| 92 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging |
| 93 | * checks that will enforce the restrictions and will also do |
| 94 | * deadlock debugging. ) |
| 95 | * |
| 96 | * This function is similar to (but not equivalent to) down(). |
| 97 | */ |
H. Peter Anvin | b09d250 | 2009-04-01 17:21:56 -0700 | [diff] [blame] | 98 | void __sched mutex_lock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 99 | { |
Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 100 | might_sleep(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 101 | /* |
| 102 | * The locking fastpath is the 1->0 transition from |
| 103 | * 'unlocked' into 'locked' state. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 104 | */ |
| 105 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 106 | mutex_set_owner(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 107 | } |
| 108 | |
| 109 | EXPORT_SYMBOL(mutex_lock); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 110 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 111 | |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 112 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 113 | /* |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame^] | 114 | * In order to avoid a stampede of mutex spinners from acquiring the mutex |
| 115 | * more or less simultaneously, the spinners need to acquire a MCS lock |
| 116 | * first before spinning on the owner field. |
| 117 | * |
| 118 | * We don't inline mspin_lock() so that perf can correctly account for the |
| 119 | * time spent in this lock function. |
| 120 | */ |
| 121 | struct mspin_node { |
| 122 | struct mspin_node *next ; |
| 123 | int locked; /* 1 if lock acquired */ |
| 124 | }; |
| 125 | #define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock)) |
| 126 | |
| 127 | static noinline |
| 128 | void mspin_lock(struct mspin_node **lock, struct mspin_node *node) |
| 129 | { |
| 130 | struct mspin_node *prev; |
| 131 | |
| 132 | /* Init node */ |
| 133 | node->locked = 0; |
| 134 | node->next = NULL; |
| 135 | |
| 136 | prev = xchg(lock, node); |
| 137 | if (likely(prev == NULL)) { |
| 138 | /* Lock acquired */ |
| 139 | node->locked = 1; |
| 140 | return; |
| 141 | } |
| 142 | ACCESS_ONCE(prev->next) = node; |
| 143 | smp_wmb(); |
| 144 | /* Wait until the lock holder passes the lock down */ |
| 145 | while (!ACCESS_ONCE(node->locked)) |
| 146 | arch_mutex_cpu_relax(); |
| 147 | } |
| 148 | |
| 149 | static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node) |
| 150 | { |
| 151 | struct mspin_node *next = ACCESS_ONCE(node->next); |
| 152 | |
| 153 | if (likely(!next)) { |
| 154 | /* |
| 155 | * Release the lock by setting it to NULL |
| 156 | */ |
| 157 | if (cmpxchg(lock, node, NULL) == node) |
| 158 | return; |
| 159 | /* Wait until the next pointer is set */ |
| 160 | while (!(next = ACCESS_ONCE(node->next))) |
| 161 | arch_mutex_cpu_relax(); |
| 162 | } |
| 163 | ACCESS_ONCE(next->locked) = 1; |
| 164 | smp_wmb(); |
| 165 | } |
| 166 | |
| 167 | /* |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 168 | * Mutex spinning code migrated from kernel/sched/core.c |
| 169 | */ |
| 170 | |
| 171 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) |
| 172 | { |
| 173 | if (lock->owner != owner) |
| 174 | return false; |
| 175 | |
| 176 | /* |
| 177 | * Ensure we emit the owner->on_cpu, dereference _after_ checking |
| 178 | * lock->owner still matches owner, if that fails, owner might |
| 179 | * point to free()d memory, if it still matches, the rcu_read_lock() |
| 180 | * ensures the memory stays valid. |
| 181 | */ |
| 182 | barrier(); |
| 183 | |
| 184 | return owner->on_cpu; |
| 185 | } |
| 186 | |
| 187 | /* |
| 188 | * Look out! "owner" is an entirely speculative pointer |
| 189 | * access and not reliable. |
| 190 | */ |
| 191 | static noinline |
| 192 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) |
| 193 | { |
| 194 | rcu_read_lock(); |
| 195 | while (owner_running(lock, owner)) { |
| 196 | if (need_resched()) |
| 197 | break; |
| 198 | |
| 199 | arch_mutex_cpu_relax(); |
| 200 | } |
| 201 | rcu_read_unlock(); |
| 202 | |
| 203 | /* |
| 204 | * We break out the loop above on need_resched() and when the |
| 205 | * owner changed, which is a sign for heavy contention. Return |
| 206 | * success only when lock->owner is NULL. |
| 207 | */ |
| 208 | return lock->owner == NULL; |
| 209 | } |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame^] | 210 | |
| 211 | /* |
| 212 | * Initial check for entering the mutex spinning loop |
| 213 | */ |
| 214 | static inline int mutex_can_spin_on_owner(struct mutex *lock) |
| 215 | { |
| 216 | int retval = 1; |
| 217 | |
| 218 | rcu_read_lock(); |
| 219 | if (lock->owner) |
| 220 | retval = lock->owner->on_cpu; |
| 221 | rcu_read_unlock(); |
| 222 | /* |
| 223 | * if lock->owner is not set, the mutex owner may have just acquired |
| 224 | * it and not set the owner yet or the mutex has been released. |
| 225 | */ |
| 226 | return retval; |
| 227 | } |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 228 | #endif |
| 229 | |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 230 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 231 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 232 | /** |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 233 | * mutex_unlock - release the mutex |
| 234 | * @lock: the mutex to be released |
| 235 | * |
| 236 | * Unlock a mutex that has been locked by this task previously. |
| 237 | * |
| 238 | * This function must not be used in interrupt context. Unlocking |
| 239 | * of a not locked mutex is not allowed. |
| 240 | * |
| 241 | * This function is similar to (but not equivalent to) up(). |
| 242 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 243 | void __sched mutex_unlock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 244 | { |
| 245 | /* |
| 246 | * The unlocking fastpath is the 0->1 transition from 'locked' |
| 247 | * into 'unlocked' state: |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 248 | */ |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 249 | #ifndef CONFIG_DEBUG_MUTEXES |
| 250 | /* |
| 251 | * When debugging is enabled we must not clear the owner before time, |
| 252 | * the slow path will always be taken, and that clears the owner field |
| 253 | * after verifying that it was indeed current. |
| 254 | */ |
| 255 | mutex_clear_owner(lock); |
| 256 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 257 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
| 258 | } |
| 259 | |
| 260 | EXPORT_SYMBOL(mutex_unlock); |
| 261 | |
| 262 | /* |
| 263 | * Lock a mutex (possibly interruptible), slowpath: |
| 264 | */ |
| 265 | static inline int __sched |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 266 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 267 | struct lockdep_map *nest_lock, unsigned long ip) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 268 | { |
| 269 | struct task_struct *task = current; |
| 270 | struct mutex_waiter waiter; |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 271 | unsigned long flags; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 272 | |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 273 | preempt_disable(); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 274 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
Frederic Weisbecker | c022602 | 2009-12-02 20:49:16 +0100 | [diff] [blame] | 275 | |
| 276 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 277 | /* |
| 278 | * Optimistic spinning. |
| 279 | * |
| 280 | * We try to spin for acquisition when we find that there are no |
| 281 | * pending waiters and the lock owner is currently running on a |
| 282 | * (different) CPU. |
| 283 | * |
| 284 | * The rationale is that if the lock owner is running, it is likely to |
| 285 | * release the lock soon. |
| 286 | * |
| 287 | * Since this needs the lock owner, and this mutex implementation |
| 288 | * doesn't track the owner atomically in the lock field, we need to |
| 289 | * track it non-atomically. |
| 290 | * |
| 291 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock |
| 292 | * to serialize everything. |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame^] | 293 | * |
| 294 | * The mutex spinners are queued up using MCS lock so that only one |
| 295 | * spinner can compete for the mutex. However, if mutex spinning isn't |
| 296 | * going to happen, there is no point in going through the lock/unlock |
| 297 | * overhead. |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 298 | */ |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame^] | 299 | if (!mutex_can_spin_on_owner(lock)) |
| 300 | goto slowpath; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 301 | |
| 302 | for (;;) { |
Peter Zijlstra | c6eb3dd | 2011-04-05 17:23:41 +0200 | [diff] [blame] | 303 | struct task_struct *owner; |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame^] | 304 | struct mspin_node node; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 305 | |
| 306 | /* |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 307 | * If there's an owner, wait for it to either |
| 308 | * release the lock or go to sleep. |
| 309 | */ |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame^] | 310 | mspin_lock(MLOCK(lock), &node); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 311 | owner = ACCESS_ONCE(lock->owner); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame^] | 312 | if (owner && !mutex_spin_on_owner(lock, owner)) { |
| 313 | mspin_unlock(MLOCK(lock), &node); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 314 | break; |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame^] | 315 | } |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 316 | |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 317 | if ((atomic_read(&lock->count) == 1) && |
| 318 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { |
Chris Mason | ac6e60e | 2009-01-14 17:29:31 +0100 | [diff] [blame] | 319 | lock_acquired(&lock->dep_map, ip); |
| 320 | mutex_set_owner(lock); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame^] | 321 | mspin_unlock(MLOCK(lock), &node); |
Chris Mason | ac6e60e | 2009-01-14 17:29:31 +0100 | [diff] [blame] | 322 | preempt_enable(); |
| 323 | return 0; |
| 324 | } |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame^] | 325 | mspin_unlock(MLOCK(lock), &node); |
Chris Mason | ac6e60e | 2009-01-14 17:29:31 +0100 | [diff] [blame] | 326 | |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 327 | /* |
| 328 | * When there's no owner, we might have preempted between the |
| 329 | * owner acquiring the lock and setting the owner field. If |
| 330 | * we're an RT task that will live-lock because we won't let |
| 331 | * the owner complete. |
| 332 | */ |
| 333 | if (!owner && (need_resched() || rt_task(task))) |
| 334 | break; |
| 335 | |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 336 | /* |
| 337 | * The cpu_relax() call is a compiler barrier which forces |
| 338 | * everything in this loop to be re-loaded. We don't need |
| 339 | * memory barriers as we'll eventually observe the right |
| 340 | * values at the cost of a few extra spins. |
| 341 | */ |
Gerald Schaefer | 335d7af | 2010-11-22 15:47:36 +0100 | [diff] [blame] | 342 | arch_mutex_cpu_relax(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 343 | } |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame^] | 344 | slowpath: |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 345 | #endif |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 346 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 347 | |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 348 | debug_mutex_lock_common(lock, &waiter); |
Roman Zippel | c9f4f06 | 2007-05-09 02:35:16 -0700 | [diff] [blame] | 349 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 350 | |
| 351 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
| 352 | list_add_tail(&waiter.list, &lock->wait_list); |
| 353 | waiter.task = task; |
| 354 | |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 355 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) |
Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 356 | goto done; |
| 357 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 358 | lock_contended(&lock->dep_map, ip); |
Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 359 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 360 | for (;;) { |
| 361 | /* |
| 362 | * Lets try to take the lock again - this is needed even if |
| 363 | * we get here for the first time (shortly after failing to |
| 364 | * acquire the lock), to make sure that we get a wakeup once |
| 365 | * it's unlocked. Later on, if we sleep, this is the |
| 366 | * operation that gives us the lock. We xchg it to -1, so |
| 367 | * that when we release the lock, we properly wake up the |
| 368 | * other waiters: |
| 369 | */ |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 370 | if (MUTEX_SHOW_NO_WAITER(lock) && |
| 371 | (atomic_xchg(&lock->count, -1) == 1)) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 372 | break; |
| 373 | |
| 374 | /* |
| 375 | * got a signal? (This code gets eliminated in the |
| 376 | * TASK_UNINTERRUPTIBLE case.) |
| 377 | */ |
Oleg Nesterov | 6ad3676 | 2008-06-08 21:20:42 +0400 | [diff] [blame] | 378 | if (unlikely(signal_pending_state(state, task))) { |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 379 | mutex_remove_waiter(lock, &waiter, |
| 380 | task_thread_info(task)); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 381 | mutex_release(&lock->dep_map, 1, ip); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 382 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 383 | |
| 384 | debug_mutex_free_waiter(&waiter); |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 385 | preempt_enable(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 386 | return -EINTR; |
| 387 | } |
| 388 | __set_task_state(task, state); |
| 389 | |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 390 | /* didn't get the lock, go to sleep: */ |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 391 | spin_unlock_mutex(&lock->wait_lock, flags); |
Thomas Gleixner | bd2f553 | 2011-03-21 12:33:18 +0100 | [diff] [blame] | 392 | schedule_preempt_disabled(); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 393 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 394 | } |
| 395 | |
Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 396 | done: |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 397 | lock_acquired(&lock->dep_map, ip); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 398 | /* got the lock - rejoice! */ |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 399 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
| 400 | mutex_set_owner(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 401 | |
| 402 | /* set it to 0 if there are no waiters left: */ |
| 403 | if (likely(list_empty(&lock->wait_list))) |
| 404 | atomic_set(&lock->count, 0); |
| 405 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 406 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 407 | |
| 408 | debug_mutex_free_waiter(&waiter); |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 409 | preempt_enable(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 410 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 411 | return 0; |
| 412 | } |
| 413 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 414 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 415 | void __sched |
| 416 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
| 417 | { |
| 418 | might_sleep(); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 419 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 420 | } |
| 421 | |
| 422 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 423 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 424 | void __sched |
| 425 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) |
| 426 | { |
| 427 | might_sleep(); |
| 428 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); |
| 429 | } |
| 430 | |
| 431 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
| 432 | |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 433 | int __sched |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 434 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) |
| 435 | { |
| 436 | might_sleep(); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 437 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 438 | } |
| 439 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); |
| 440 | |
| 441 | int __sched |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 442 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
| 443 | { |
| 444 | might_sleep(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 445 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 446 | subclass, NULL, _RET_IP_); |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 447 | } |
| 448 | |
| 449 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 450 | #endif |
| 451 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 452 | /* |
| 453 | * Release the lock, slowpath: |
| 454 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 455 | static inline void |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 456 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 457 | { |
Ingo Molnar | 0270664 | 2006-01-10 23:15:02 +0100 | [diff] [blame] | 458 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 459 | unsigned long flags; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 460 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 461 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 462 | mutex_release(&lock->dep_map, nested, _RET_IP_); |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 463 | debug_mutex_unlock(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 464 | |
| 465 | /* |
| 466 | * some architectures leave the lock unlocked in the fastpath failure |
| 467 | * case, others need to leave it locked. In the later case we have to |
| 468 | * unlock it here |
| 469 | */ |
| 470 | if (__mutex_slowpath_needs_to_unlock()) |
| 471 | atomic_set(&lock->count, 1); |
| 472 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 473 | if (!list_empty(&lock->wait_list)) { |
| 474 | /* get the first entry from the wait-list: */ |
| 475 | struct mutex_waiter *waiter = |
| 476 | list_entry(lock->wait_list.next, |
| 477 | struct mutex_waiter, list); |
| 478 | |
| 479 | debug_mutex_wake_waiter(lock, waiter); |
| 480 | |
| 481 | wake_up_process(waiter->task); |
| 482 | } |
| 483 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 484 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 485 | } |
| 486 | |
| 487 | /* |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 488 | * Release the lock, slowpath: |
| 489 | */ |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 490 | static __used noinline void |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 491 | __mutex_unlock_slowpath(atomic_t *lock_count) |
| 492 | { |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 493 | __mutex_unlock_common_slowpath(lock_count, 1); |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 494 | } |
| 495 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 496 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 497 | /* |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 498 | * Here come the less common (and hence less performance-critical) APIs: |
| 499 | * mutex_lock_interruptible() and mutex_trylock(). |
| 500 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 501 | static noinline int __sched |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 502 | __mutex_lock_killable_slowpath(atomic_t *lock_count); |
| 503 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 504 | static noinline int __sched |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 505 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 506 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 507 | /** |
| 508 | * mutex_lock_interruptible - acquire the mutex, interruptible |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 509 | * @lock: the mutex to be acquired |
| 510 | * |
| 511 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has |
| 512 | * been acquired or sleep until the mutex becomes available. If a |
| 513 | * signal arrives while waiting for the lock then this function |
| 514 | * returns -EINTR. |
| 515 | * |
| 516 | * This function is similar to (but not equivalent to) down_interruptible(). |
| 517 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 518 | int __sched mutex_lock_interruptible(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 519 | { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 520 | int ret; |
| 521 | |
Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 522 | might_sleep(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 523 | ret = __mutex_fastpath_lock_retval |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 524 | (&lock->count, __mutex_lock_interruptible_slowpath); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 525 | if (!ret) |
| 526 | mutex_set_owner(lock); |
| 527 | |
| 528 | return ret; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 529 | } |
| 530 | |
| 531 | EXPORT_SYMBOL(mutex_lock_interruptible); |
| 532 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 533 | int __sched mutex_lock_killable(struct mutex *lock) |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 534 | { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 535 | int ret; |
| 536 | |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 537 | might_sleep(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 538 | ret = __mutex_fastpath_lock_retval |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 539 | (&lock->count, __mutex_lock_killable_slowpath); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 540 | if (!ret) |
| 541 | mutex_set_owner(lock); |
| 542 | |
| 543 | return ret; |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 544 | } |
| 545 | EXPORT_SYMBOL(mutex_lock_killable); |
| 546 | |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 547 | static __used noinline void __sched |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 548 | __mutex_lock_slowpath(atomic_t *lock_count) |
| 549 | { |
| 550 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 551 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 552 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 553 | } |
| 554 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 555 | static noinline int __sched |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 556 | __mutex_lock_killable_slowpath(atomic_t *lock_count) |
| 557 | { |
| 558 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 559 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 560 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 561 | } |
| 562 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 563 | static noinline int __sched |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 564 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 565 | { |
| 566 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 567 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 568 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 569 | } |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 570 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 571 | |
| 572 | /* |
| 573 | * Spinlock based trylock, we take the spinlock and check whether we |
| 574 | * can get the lock: |
| 575 | */ |
| 576 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) |
| 577 | { |
| 578 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 579 | unsigned long flags; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 580 | int prev; |
| 581 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 582 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 583 | |
| 584 | prev = atomic_xchg(&lock->count, -1); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 585 | if (likely(prev == 1)) { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 586 | mutex_set_owner(lock); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 587 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| 588 | } |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 589 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 590 | /* Set it back to 0 if there are no waiters: */ |
| 591 | if (likely(list_empty(&lock->wait_list))) |
| 592 | atomic_set(&lock->count, 0); |
| 593 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 594 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 595 | |
| 596 | return prev == 1; |
| 597 | } |
| 598 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 599 | /** |
| 600 | * mutex_trylock - try to acquire the mutex, without waiting |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 601 | * @lock: the mutex to be acquired |
| 602 | * |
| 603 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
| 604 | * has been acquired successfully, and 0 on contention. |
| 605 | * |
| 606 | * NOTE: this function follows the spin_trylock() convention, so |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 607 | * it is negated from the down_trylock() return values! Be careful |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 608 | * about this when converting semaphore users to mutexes. |
| 609 | * |
| 610 | * This function must not be used in interrupt context. The |
| 611 | * mutex must be released by the same task that acquired it. |
| 612 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 613 | int __sched mutex_trylock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 614 | { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 615 | int ret; |
| 616 | |
| 617 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); |
| 618 | if (ret) |
| 619 | mutex_set_owner(lock); |
| 620 | |
| 621 | return ret; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 622 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 623 | EXPORT_SYMBOL(mutex_trylock); |
Andrew Morton | a511e3f | 2009-04-29 15:59:58 -0700 | [diff] [blame] | 624 | |
| 625 | /** |
| 626 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 |
| 627 | * @cnt: the atomic which we are to dec |
| 628 | * @lock: the mutex to return holding if we dec to 0 |
| 629 | * |
| 630 | * return true and hold lock if we dec to 0, return false otherwise |
| 631 | */ |
| 632 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) |
| 633 | { |
| 634 | /* dec if we can't possibly hit 0 */ |
| 635 | if (atomic_add_unless(cnt, -1, 1)) |
| 636 | return 0; |
| 637 | /* we might hit 0, so take the lock */ |
| 638 | mutex_lock(lock); |
| 639 | if (!atomic_dec_and_test(cnt)) { |
| 640 | /* when we actually did the dec, we didn't hit 0 */ |
| 641 | mutex_unlock(lock); |
| 642 | return 0; |
| 643 | } |
| 644 | /* we hit 0, and we hold the lock */ |
| 645 | return 1; |
| 646 | } |
| 647 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |