Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1 | /* |
| 2 | * kernel/mutex.c |
| 3 | * |
| 4 | * Mutexes: blocking mutual exclusion locks |
| 5 | * |
| 6 | * Started by Ingo Molnar: |
| 7 | * |
| 8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 9 | * |
| 10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
| 11 | * David Howells for suggestions and improvements. |
| 12 | * |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
| 14 | * from the -rt tree, where it was originally implemented for rtmutexes |
| 15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale |
| 16 | * and Sven Dietrich. |
| 17 | * |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 18 | * Also see Documentation/mutex-design.txt. |
| 19 | */ |
| 20 | #include <linux/mutex.h> |
| 21 | #include <linux/sched.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 22 | #include <linux/sched/rt.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 23 | #include <linux/export.h> |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/interrupt.h> |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 26 | #include <linux/debug_locks.h> |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 27 | |
| 28 | /* |
| 29 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, |
| 30 | * which forces all calls into the slowpath: |
| 31 | */ |
| 32 | #ifdef CONFIG_DEBUG_MUTEXES |
| 33 | # include "mutex-debug.h" |
| 34 | # include <asm-generic/mutex-null.h> |
| 35 | #else |
| 36 | # include "mutex.h" |
| 37 | # include <asm/mutex.h> |
| 38 | #endif |
| 39 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 40 | void |
| 41 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 42 | { |
| 43 | atomic_set(&lock->count, 1); |
| 44 | spin_lock_init(&lock->wait_lock); |
| 45 | INIT_LIST_HEAD(&lock->wait_list); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 46 | mutex_clear_owner(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 47 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 48 | debug_mutex_init(lock, name, key); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 49 | } |
| 50 | |
| 51 | EXPORT_SYMBOL(__mutex_init); |
| 52 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 53 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 54 | /* |
| 55 | * We split the mutex lock/unlock logic into separate fastpath and |
| 56 | * slowpath functions, to reduce the register pressure on the fastpath. |
| 57 | * We also put the fastpath first in the kernel image, to make sure the |
| 58 | * branch is predicted by the CPU as default-untaken. |
| 59 | */ |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 60 | static __used noinline void __sched |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 61 | __mutex_lock_slowpath(atomic_t *lock_count); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 62 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 63 | /** |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 64 | * mutex_lock - acquire the mutex |
| 65 | * @lock: the mutex to be acquired |
| 66 | * |
| 67 | * Lock the mutex exclusively for this task. If the mutex is not |
| 68 | * available right now, it will sleep until it can get it. |
| 69 | * |
| 70 | * The mutex must later on be released by the same task that |
| 71 | * acquired it. Recursive locking is not allowed. The task |
| 72 | * may not exit without first unlocking the mutex. Also, kernel |
| 73 | * memory where the mutex resides mutex must not be freed with |
| 74 | * the mutex still locked. The mutex must first be initialized |
| 75 | * (or statically defined) before it can be locked. memset()-ing |
| 76 | * the mutex to 0 is not allowed. |
| 77 | * |
| 78 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging |
| 79 | * checks that will enforce the restrictions and will also do |
| 80 | * deadlock debugging. ) |
| 81 | * |
| 82 | * This function is similar to (but not equivalent to) down(). |
| 83 | */ |
H. Peter Anvin | b09d250 | 2009-04-01 17:21:56 -0700 | [diff] [blame] | 84 | void __sched mutex_lock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 85 | { |
Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 86 | might_sleep(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 87 | /* |
| 88 | * The locking fastpath is the 1->0 transition from |
| 89 | * 'unlocked' into 'locked' state. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 90 | */ |
| 91 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 92 | mutex_set_owner(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | EXPORT_SYMBOL(mutex_lock); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 96 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 97 | |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame^] | 98 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 99 | /* |
| 100 | * Mutex spinning code migrated from kernel/sched/core.c |
| 101 | */ |
| 102 | |
| 103 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) |
| 104 | { |
| 105 | if (lock->owner != owner) |
| 106 | return false; |
| 107 | |
| 108 | /* |
| 109 | * Ensure we emit the owner->on_cpu, dereference _after_ checking |
| 110 | * lock->owner still matches owner, if that fails, owner might |
| 111 | * point to free()d memory, if it still matches, the rcu_read_lock() |
| 112 | * ensures the memory stays valid. |
| 113 | */ |
| 114 | barrier(); |
| 115 | |
| 116 | return owner->on_cpu; |
| 117 | } |
| 118 | |
| 119 | /* |
| 120 | * Look out! "owner" is an entirely speculative pointer |
| 121 | * access and not reliable. |
| 122 | */ |
| 123 | static noinline |
| 124 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) |
| 125 | { |
| 126 | rcu_read_lock(); |
| 127 | while (owner_running(lock, owner)) { |
| 128 | if (need_resched()) |
| 129 | break; |
| 130 | |
| 131 | arch_mutex_cpu_relax(); |
| 132 | } |
| 133 | rcu_read_unlock(); |
| 134 | |
| 135 | /* |
| 136 | * We break out the loop above on need_resched() and when the |
| 137 | * owner changed, which is a sign for heavy contention. Return |
| 138 | * success only when lock->owner is NULL. |
| 139 | */ |
| 140 | return lock->owner == NULL; |
| 141 | } |
| 142 | #endif |
| 143 | |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 144 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 145 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 146 | /** |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 147 | * mutex_unlock - release the mutex |
| 148 | * @lock: the mutex to be released |
| 149 | * |
| 150 | * Unlock a mutex that has been locked by this task previously. |
| 151 | * |
| 152 | * This function must not be used in interrupt context. Unlocking |
| 153 | * of a not locked mutex is not allowed. |
| 154 | * |
| 155 | * This function is similar to (but not equivalent to) up(). |
| 156 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 157 | void __sched mutex_unlock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 158 | { |
| 159 | /* |
| 160 | * The unlocking fastpath is the 0->1 transition from 'locked' |
| 161 | * into 'unlocked' state: |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 162 | */ |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 163 | #ifndef CONFIG_DEBUG_MUTEXES |
| 164 | /* |
| 165 | * When debugging is enabled we must not clear the owner before time, |
| 166 | * the slow path will always be taken, and that clears the owner field |
| 167 | * after verifying that it was indeed current. |
| 168 | */ |
| 169 | mutex_clear_owner(lock); |
| 170 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 171 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
| 172 | } |
| 173 | |
| 174 | EXPORT_SYMBOL(mutex_unlock); |
| 175 | |
| 176 | /* |
| 177 | * Lock a mutex (possibly interruptible), slowpath: |
| 178 | */ |
| 179 | static inline int __sched |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 180 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 181 | struct lockdep_map *nest_lock, unsigned long ip) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 182 | { |
| 183 | struct task_struct *task = current; |
| 184 | struct mutex_waiter waiter; |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 185 | unsigned long flags; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 186 | |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 187 | preempt_disable(); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 188 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
Frederic Weisbecker | c022602 | 2009-12-02 20:49:16 +0100 | [diff] [blame] | 189 | |
| 190 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 191 | /* |
| 192 | * Optimistic spinning. |
| 193 | * |
| 194 | * We try to spin for acquisition when we find that there are no |
| 195 | * pending waiters and the lock owner is currently running on a |
| 196 | * (different) CPU. |
| 197 | * |
| 198 | * The rationale is that if the lock owner is running, it is likely to |
| 199 | * release the lock soon. |
| 200 | * |
| 201 | * Since this needs the lock owner, and this mutex implementation |
| 202 | * doesn't track the owner atomically in the lock field, we need to |
| 203 | * track it non-atomically. |
| 204 | * |
| 205 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock |
| 206 | * to serialize everything. |
| 207 | */ |
| 208 | |
| 209 | for (;;) { |
Peter Zijlstra | c6eb3dd | 2011-04-05 17:23:41 +0200 | [diff] [blame] | 210 | struct task_struct *owner; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 211 | |
| 212 | /* |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 213 | * If there's an owner, wait for it to either |
| 214 | * release the lock or go to sleep. |
| 215 | */ |
| 216 | owner = ACCESS_ONCE(lock->owner); |
| 217 | if (owner && !mutex_spin_on_owner(lock, owner)) |
| 218 | break; |
| 219 | |
Chris Mason | ac6e60e | 2009-01-14 17:29:31 +0100 | [diff] [blame] | 220 | if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { |
| 221 | lock_acquired(&lock->dep_map, ip); |
| 222 | mutex_set_owner(lock); |
| 223 | preempt_enable(); |
| 224 | return 0; |
| 225 | } |
| 226 | |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 227 | /* |
| 228 | * When there's no owner, we might have preempted between the |
| 229 | * owner acquiring the lock and setting the owner field. If |
| 230 | * we're an RT task that will live-lock because we won't let |
| 231 | * the owner complete. |
| 232 | */ |
| 233 | if (!owner && (need_resched() || rt_task(task))) |
| 234 | break; |
| 235 | |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 236 | /* |
| 237 | * The cpu_relax() call is a compiler barrier which forces |
| 238 | * everything in this loop to be re-loaded. We don't need |
| 239 | * memory barriers as we'll eventually observe the right |
| 240 | * values at the cost of a few extra spins. |
| 241 | */ |
Gerald Schaefer | 335d7af | 2010-11-22 15:47:36 +0100 | [diff] [blame] | 242 | arch_mutex_cpu_relax(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 243 | } |
| 244 | #endif |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 245 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 246 | |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 247 | debug_mutex_lock_common(lock, &waiter); |
Roman Zippel | c9f4f06 | 2007-05-09 02:35:16 -0700 | [diff] [blame] | 248 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 249 | |
| 250 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
| 251 | list_add_tail(&waiter.list, &lock->wait_list); |
| 252 | waiter.task = task; |
| 253 | |
Peter Zijlstra | 93d81d1 | 2009-01-14 15:32:51 +0100 | [diff] [blame] | 254 | if (atomic_xchg(&lock->count, -1) == 1) |
Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 255 | goto done; |
| 256 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 257 | lock_contended(&lock->dep_map, ip); |
Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 258 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 259 | for (;;) { |
| 260 | /* |
| 261 | * Lets try to take the lock again - this is needed even if |
| 262 | * we get here for the first time (shortly after failing to |
| 263 | * acquire the lock), to make sure that we get a wakeup once |
| 264 | * it's unlocked. Later on, if we sleep, this is the |
| 265 | * operation that gives us the lock. We xchg it to -1, so |
| 266 | * that when we release the lock, we properly wake up the |
| 267 | * other waiters: |
| 268 | */ |
Peter Zijlstra | 93d81d1 | 2009-01-14 15:32:51 +0100 | [diff] [blame] | 269 | if (atomic_xchg(&lock->count, -1) == 1) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 270 | break; |
| 271 | |
| 272 | /* |
| 273 | * got a signal? (This code gets eliminated in the |
| 274 | * TASK_UNINTERRUPTIBLE case.) |
| 275 | */ |
Oleg Nesterov | 6ad3676 | 2008-06-08 21:20:42 +0400 | [diff] [blame] | 276 | if (unlikely(signal_pending_state(state, task))) { |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 277 | mutex_remove_waiter(lock, &waiter, |
| 278 | task_thread_info(task)); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 279 | mutex_release(&lock->dep_map, 1, ip); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 280 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 281 | |
| 282 | debug_mutex_free_waiter(&waiter); |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 283 | preempt_enable(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 284 | return -EINTR; |
| 285 | } |
| 286 | __set_task_state(task, state); |
| 287 | |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 288 | /* didn't get the lock, go to sleep: */ |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 289 | spin_unlock_mutex(&lock->wait_lock, flags); |
Thomas Gleixner | bd2f553 | 2011-03-21 12:33:18 +0100 | [diff] [blame] | 290 | schedule_preempt_disabled(); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 291 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 292 | } |
| 293 | |
Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 294 | done: |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 295 | lock_acquired(&lock->dep_map, ip); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 296 | /* got the lock - rejoice! */ |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 297 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
| 298 | mutex_set_owner(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 299 | |
| 300 | /* set it to 0 if there are no waiters left: */ |
| 301 | if (likely(list_empty(&lock->wait_list))) |
| 302 | atomic_set(&lock->count, 0); |
| 303 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 304 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 305 | |
| 306 | debug_mutex_free_waiter(&waiter); |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 307 | preempt_enable(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 308 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 309 | return 0; |
| 310 | } |
| 311 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 312 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 313 | void __sched |
| 314 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
| 315 | { |
| 316 | might_sleep(); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 317 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 318 | } |
| 319 | |
| 320 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 321 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 322 | void __sched |
| 323 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) |
| 324 | { |
| 325 | might_sleep(); |
| 326 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); |
| 327 | } |
| 328 | |
| 329 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
| 330 | |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 331 | int __sched |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 332 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) |
| 333 | { |
| 334 | might_sleep(); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 335 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 336 | } |
| 337 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); |
| 338 | |
| 339 | int __sched |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 340 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
| 341 | { |
| 342 | might_sleep(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 343 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 344 | subclass, NULL, _RET_IP_); |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 345 | } |
| 346 | |
| 347 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 348 | #endif |
| 349 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 350 | /* |
| 351 | * Release the lock, slowpath: |
| 352 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 353 | static inline void |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 354 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 355 | { |
Ingo Molnar | 0270664 | 2006-01-10 23:15:02 +0100 | [diff] [blame] | 356 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 357 | unsigned long flags; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 358 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 359 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 360 | mutex_release(&lock->dep_map, nested, _RET_IP_); |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 361 | debug_mutex_unlock(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 362 | |
| 363 | /* |
| 364 | * some architectures leave the lock unlocked in the fastpath failure |
| 365 | * case, others need to leave it locked. In the later case we have to |
| 366 | * unlock it here |
| 367 | */ |
| 368 | if (__mutex_slowpath_needs_to_unlock()) |
| 369 | atomic_set(&lock->count, 1); |
| 370 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 371 | if (!list_empty(&lock->wait_list)) { |
| 372 | /* get the first entry from the wait-list: */ |
| 373 | struct mutex_waiter *waiter = |
| 374 | list_entry(lock->wait_list.next, |
| 375 | struct mutex_waiter, list); |
| 376 | |
| 377 | debug_mutex_wake_waiter(lock, waiter); |
| 378 | |
| 379 | wake_up_process(waiter->task); |
| 380 | } |
| 381 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 382 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 383 | } |
| 384 | |
| 385 | /* |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 386 | * Release the lock, slowpath: |
| 387 | */ |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 388 | static __used noinline void |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 389 | __mutex_unlock_slowpath(atomic_t *lock_count) |
| 390 | { |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 391 | __mutex_unlock_common_slowpath(lock_count, 1); |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 392 | } |
| 393 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 394 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 395 | /* |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 396 | * Here come the less common (and hence less performance-critical) APIs: |
| 397 | * mutex_lock_interruptible() and mutex_trylock(). |
| 398 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 399 | static noinline int __sched |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 400 | __mutex_lock_killable_slowpath(atomic_t *lock_count); |
| 401 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 402 | static noinline int __sched |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 403 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 404 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 405 | /** |
| 406 | * mutex_lock_interruptible - acquire the mutex, interruptible |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 407 | * @lock: the mutex to be acquired |
| 408 | * |
| 409 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has |
| 410 | * been acquired or sleep until the mutex becomes available. If a |
| 411 | * signal arrives while waiting for the lock then this function |
| 412 | * returns -EINTR. |
| 413 | * |
| 414 | * This function is similar to (but not equivalent to) down_interruptible(). |
| 415 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 416 | int __sched mutex_lock_interruptible(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 417 | { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 418 | int ret; |
| 419 | |
Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 420 | might_sleep(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 421 | ret = __mutex_fastpath_lock_retval |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 422 | (&lock->count, __mutex_lock_interruptible_slowpath); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 423 | if (!ret) |
| 424 | mutex_set_owner(lock); |
| 425 | |
| 426 | return ret; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 427 | } |
| 428 | |
| 429 | EXPORT_SYMBOL(mutex_lock_interruptible); |
| 430 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 431 | int __sched mutex_lock_killable(struct mutex *lock) |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 432 | { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 433 | int ret; |
| 434 | |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 435 | might_sleep(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 436 | ret = __mutex_fastpath_lock_retval |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 437 | (&lock->count, __mutex_lock_killable_slowpath); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 438 | if (!ret) |
| 439 | mutex_set_owner(lock); |
| 440 | |
| 441 | return ret; |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 442 | } |
| 443 | EXPORT_SYMBOL(mutex_lock_killable); |
| 444 | |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 445 | static __used noinline void __sched |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 446 | __mutex_lock_slowpath(atomic_t *lock_count) |
| 447 | { |
| 448 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 449 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 450 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 451 | } |
| 452 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 453 | static noinline int __sched |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 454 | __mutex_lock_killable_slowpath(atomic_t *lock_count) |
| 455 | { |
| 456 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 457 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 458 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 459 | } |
| 460 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 461 | static noinline int __sched |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 462 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 463 | { |
| 464 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 465 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 466 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 467 | } |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 468 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 469 | |
| 470 | /* |
| 471 | * Spinlock based trylock, we take the spinlock and check whether we |
| 472 | * can get the lock: |
| 473 | */ |
| 474 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) |
| 475 | { |
| 476 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 477 | unsigned long flags; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 478 | int prev; |
| 479 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 480 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 481 | |
| 482 | prev = atomic_xchg(&lock->count, -1); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 483 | if (likely(prev == 1)) { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 484 | mutex_set_owner(lock); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 485 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| 486 | } |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 487 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 488 | /* Set it back to 0 if there are no waiters: */ |
| 489 | if (likely(list_empty(&lock->wait_list))) |
| 490 | atomic_set(&lock->count, 0); |
| 491 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 492 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 493 | |
| 494 | return prev == 1; |
| 495 | } |
| 496 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 497 | /** |
| 498 | * mutex_trylock - try to acquire the mutex, without waiting |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 499 | * @lock: the mutex to be acquired |
| 500 | * |
| 501 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
| 502 | * has been acquired successfully, and 0 on contention. |
| 503 | * |
| 504 | * NOTE: this function follows the spin_trylock() convention, so |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 505 | * it is negated from the down_trylock() return values! Be careful |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 506 | * about this when converting semaphore users to mutexes. |
| 507 | * |
| 508 | * This function must not be used in interrupt context. The |
| 509 | * mutex must be released by the same task that acquired it. |
| 510 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 511 | int __sched mutex_trylock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 512 | { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 513 | int ret; |
| 514 | |
| 515 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); |
| 516 | if (ret) |
| 517 | mutex_set_owner(lock); |
| 518 | |
| 519 | return ret; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 520 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 521 | EXPORT_SYMBOL(mutex_trylock); |
Andrew Morton | a511e3f | 2009-04-29 15:59:58 -0700 | [diff] [blame] | 522 | |
| 523 | /** |
| 524 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 |
| 525 | * @cnt: the atomic which we are to dec |
| 526 | * @lock: the mutex to return holding if we dec to 0 |
| 527 | * |
| 528 | * return true and hold lock if we dec to 0, return false otherwise |
| 529 | */ |
| 530 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) |
| 531 | { |
| 532 | /* dec if we can't possibly hit 0 */ |
| 533 | if (atomic_add_unless(cnt, -1, 1)) |
| 534 | return 0; |
| 535 | /* we might hit 0, so take the lock */ |
| 536 | mutex_lock(lock); |
| 537 | if (!atomic_dec_and_test(cnt)) { |
| 538 | /* when we actually did the dec, we didn't hit 0 */ |
| 539 | mutex_unlock(lock); |
| 540 | return 0; |
| 541 | } |
| 542 | /* we hit 0, and we hold the lock */ |
| 543 | return 1; |
| 544 | } |
| 545 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |