blob: a5889fb28ecff33eaf5fae64c9d2a50ca03cb2f7 [file] [log] [blame]
Ingo Molnar6053ee32006-01-09 15:59:19 -08001/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
Peter Zijlstra0d66bf62009-01-12 14:01:47 +010013 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
Ingo Molnar6053ee32006-01-09 15:59:19 -080018 * Also see Documentation/mutex-design.txt.
19 */
20#include <linux/mutex.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070025#include <linux/debug_locks.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080026
27/*
28 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
29 * which forces all calls into the slowpath:
30 */
31#ifdef CONFIG_DEBUG_MUTEXES
32# include "mutex-debug.h"
33# include <asm-generic/mutex-null.h>
34#else
35# include "mutex.h"
36# include <asm/mutex.h>
37#endif
38
Ingo Molnaref5d4702006-07-03 00:24:55 -070039void
40__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
Ingo Molnar6053ee32006-01-09 15:59:19 -080041{
42 atomic_set(&lock->count, 1);
43 spin_lock_init(&lock->wait_lock);
44 INIT_LIST_HEAD(&lock->wait_list);
Peter Zijlstra0d66bf62009-01-12 14:01:47 +010045 mutex_clear_owner(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -080046
Ingo Molnaref5d4702006-07-03 00:24:55 -070047 debug_mutex_init(lock, name, key);
Ingo Molnar6053ee32006-01-09 15:59:19 -080048}
49
50EXPORT_SYMBOL(__mutex_init);
51
Peter Zijlstrae4564f72007-10-11 22:11:12 +020052#ifndef CONFIG_DEBUG_LOCK_ALLOC
Ingo Molnar6053ee32006-01-09 15:59:19 -080053/*
54 * We split the mutex lock/unlock logic into separate fastpath and
55 * slowpath functions, to reduce the register pressure on the fastpath.
56 * We also put the fastpath first in the kernel image, to make sure the
57 * branch is predicted by the CPU as default-untaken.
58 */
Török Edwin7918baa2008-11-24 10:17:42 +020059static __used noinline void __sched
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070060__mutex_lock_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -080061
Randy Dunlapef5dc122010-09-02 15:48:16 -070062/**
Ingo Molnar6053ee32006-01-09 15:59:19 -080063 * mutex_lock - acquire the mutex
64 * @lock: the mutex to be acquired
65 *
66 * Lock the mutex exclusively for this task. If the mutex is not
67 * available right now, it will sleep until it can get it.
68 *
69 * The mutex must later on be released by the same task that
70 * acquired it. Recursive locking is not allowed. The task
71 * may not exit without first unlocking the mutex. Also, kernel
72 * memory where the mutex resides mutex must not be freed with
73 * the mutex still locked. The mutex must first be initialized
74 * (or statically defined) before it can be locked. memset()-ing
75 * the mutex to 0 is not allowed.
76 *
77 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
78 * checks that will enforce the restrictions and will also do
79 * deadlock debugging. )
80 *
81 * This function is similar to (but not equivalent to) down().
82 */
H. Peter Anvinb09d2502009-04-01 17:21:56 -070083void __sched mutex_lock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -080084{
Ingo Molnarc544bdb2006-01-10 22:10:36 +010085 might_sleep();
Ingo Molnar6053ee32006-01-09 15:59:19 -080086 /*
87 * The locking fastpath is the 1->0 transition from
88 * 'unlocked' into 'locked' state.
Ingo Molnar6053ee32006-01-09 15:59:19 -080089 */
90 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
Peter Zijlstra0d66bf62009-01-12 14:01:47 +010091 mutex_set_owner(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -080092}
93
94EXPORT_SYMBOL(mutex_lock);
Peter Zijlstrae4564f72007-10-11 22:11:12 +020095#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -080096
Török Edwin7918baa2008-11-24 10:17:42 +020097static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -080098
Randy Dunlapef5dc122010-09-02 15:48:16 -070099/**
Ingo Molnar6053ee32006-01-09 15:59:19 -0800100 * mutex_unlock - release the mutex
101 * @lock: the mutex to be released
102 *
103 * Unlock a mutex that has been locked by this task previously.
104 *
105 * This function must not be used in interrupt context. Unlocking
106 * of a not locked mutex is not allowed.
107 *
108 * This function is similar to (but not equivalent to) up().
109 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800110void __sched mutex_unlock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800111{
112 /*
113 * The unlocking fastpath is the 0->1 transition from 'locked'
114 * into 'unlocked' state:
Ingo Molnar6053ee32006-01-09 15:59:19 -0800115 */
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100116#ifndef CONFIG_DEBUG_MUTEXES
117 /*
118 * When debugging is enabled we must not clear the owner before time,
119 * the slow path will always be taken, and that clears the owner field
120 * after verifying that it was indeed current.
121 */
122 mutex_clear_owner(lock);
123#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -0800124 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
125}
126
127EXPORT_SYMBOL(mutex_unlock);
128
129/*
130 * Lock a mutex (possibly interruptible), slowpath:
131 */
132static inline int __sched
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200133__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
134 unsigned long ip)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800135{
136 struct task_struct *task = current;
137 struct mutex_waiter waiter;
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700138 unsigned long flags;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800139
Peter Zijlstra41719b02009-01-14 15:36:26 +0100140 preempt_disable();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100141 mutex_acquire(&lock->dep_map, subclass, 0, ip);
Frederic Weisbeckerc0226022009-12-02 20:49:16 +0100142
143#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100144 /*
145 * Optimistic spinning.
146 *
147 * We try to spin for acquisition when we find that there are no
148 * pending waiters and the lock owner is currently running on a
149 * (different) CPU.
150 *
151 * The rationale is that if the lock owner is running, it is likely to
152 * release the lock soon.
153 *
154 * Since this needs the lock owner, and this mutex implementation
155 * doesn't track the owner atomically in the lock field, we need to
156 * track it non-atomically.
157 *
158 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
159 * to serialize everything.
160 */
161
162 for (;;) {
163 struct thread_info *owner;
164
165 /*
Tony Breedsfd6be102010-05-19 15:46:36 +1000166 * If we own the BKL, then don't spin. The owner of
167 * the mutex might be waiting on us to release the BKL.
168 */
169 if (unlikely(current->lock_depth >= 0))
170 break;
171
172 /*
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100173 * If there's an owner, wait for it to either
174 * release the lock or go to sleep.
175 */
176 owner = ACCESS_ONCE(lock->owner);
177 if (owner && !mutex_spin_on_owner(lock, owner))
178 break;
179
Chris Masonac6e60e2009-01-14 17:29:31 +0100180 if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
181 lock_acquired(&lock->dep_map, ip);
182 mutex_set_owner(lock);
183 preempt_enable();
184 return 0;
185 }
186
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100187 /*
188 * When there's no owner, we might have preempted between the
189 * owner acquiring the lock and setting the owner field. If
190 * we're an RT task that will live-lock because we won't let
191 * the owner complete.
192 */
193 if (!owner && (need_resched() || rt_task(task)))
194 break;
195
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100196 /*
197 * The cpu_relax() call is a compiler barrier which forces
198 * everything in this loop to be re-loaded. We don't need
199 * memory barriers as we'll eventually observe the right
200 * values at the cost of a few extra spins.
201 */
Gerald Schaefer335d7af2010-11-22 15:47:36 +0100202 arch_mutex_cpu_relax();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100203 }
204#endif
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700205 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800206
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700207 debug_mutex_lock_common(lock, &waiter);
Roman Zippelc9f4f062007-05-09 02:35:16 -0700208 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
Ingo Molnar6053ee32006-01-09 15:59:19 -0800209
210 /* add waiting tasks to the end of the waitqueue (FIFO): */
211 list_add_tail(&waiter.list, &lock->wait_list);
212 waiter.task = task;
213
Peter Zijlstra93d81d12009-01-14 15:32:51 +0100214 if (atomic_xchg(&lock->count, -1) == 1)
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700215 goto done;
216
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200217 lock_contended(&lock->dep_map, ip);
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700218
Ingo Molnar6053ee32006-01-09 15:59:19 -0800219 for (;;) {
220 /*
221 * Lets try to take the lock again - this is needed even if
222 * we get here for the first time (shortly after failing to
223 * acquire the lock), to make sure that we get a wakeup once
224 * it's unlocked. Later on, if we sleep, this is the
225 * operation that gives us the lock. We xchg it to -1, so
226 * that when we release the lock, we properly wake up the
227 * other waiters:
228 */
Peter Zijlstra93d81d12009-01-14 15:32:51 +0100229 if (atomic_xchg(&lock->count, -1) == 1)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800230 break;
231
232 /*
233 * got a signal? (This code gets eliminated in the
234 * TASK_UNINTERRUPTIBLE case.)
235 */
Oleg Nesterov6ad36762008-06-08 21:20:42 +0400236 if (unlikely(signal_pending_state(state, task))) {
Liam R. Howlettad776532007-12-06 17:37:59 -0500237 mutex_remove_waiter(lock, &waiter,
238 task_thread_info(task));
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200239 mutex_release(&lock->dep_map, 1, ip);
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700240 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800241
242 debug_mutex_free_waiter(&waiter);
Peter Zijlstra41719b02009-01-14 15:36:26 +0100243 preempt_enable();
Ingo Molnar6053ee32006-01-09 15:59:19 -0800244 return -EINTR;
245 }
246 __set_task_state(task, state);
247
248 /* didnt get the lock, go to sleep: */
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700249 spin_unlock_mutex(&lock->wait_lock, flags);
Peter Zijlstraff743342009-03-13 12:21:26 +0100250 preempt_enable_no_resched();
251 schedule();
252 preempt_disable();
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700253 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800254 }
255
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700256done:
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200257 lock_acquired(&lock->dep_map, ip);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800258 /* got the lock - rejoice! */
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100259 mutex_remove_waiter(lock, &waiter, current_thread_info());
260 mutex_set_owner(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800261
262 /* set it to 0 if there are no waiters left: */
263 if (likely(list_empty(&lock->wait_list)))
264 atomic_set(&lock->count, 0);
265
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700266 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800267
268 debug_mutex_free_waiter(&waiter);
Peter Zijlstra41719b02009-01-14 15:36:26 +0100269 preempt_enable();
Ingo Molnar6053ee32006-01-09 15:59:19 -0800270
Ingo Molnar6053ee32006-01-09 15:59:19 -0800271 return 0;
272}
273
Ingo Molnaref5d4702006-07-03 00:24:55 -0700274#ifdef CONFIG_DEBUG_LOCK_ALLOC
275void __sched
276mutex_lock_nested(struct mutex *lock, unsigned int subclass)
277{
278 might_sleep();
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200279 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700280}
281
282EXPORT_SYMBOL_GPL(mutex_lock_nested);
NeilBrownd63a5a72006-12-08 02:36:17 -0800283
284int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500285mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
286{
287 might_sleep();
288 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
289}
290EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
291
292int __sched
NeilBrownd63a5a72006-12-08 02:36:17 -0800293mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
294{
295 might_sleep();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100296 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
297 subclass, _RET_IP_);
NeilBrownd63a5a72006-12-08 02:36:17 -0800298}
299
300EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700301#endif
302
Ingo Molnar6053ee32006-01-09 15:59:19 -0800303/*
304 * Release the lock, slowpath:
305 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800306static inline void
Ingo Molnaref5d4702006-07-03 00:24:55 -0700307__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800308{
Ingo Molnar02706642006-01-10 23:15:02 +0100309 struct mutex *lock = container_of(lock_count, struct mutex, count);
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700310 unsigned long flags;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800311
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700312 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700313 mutex_release(&lock->dep_map, nested, _RET_IP_);
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700314 debug_mutex_unlock(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800315
316 /*
317 * some architectures leave the lock unlocked in the fastpath failure
318 * case, others need to leave it locked. In the later case we have to
319 * unlock it here
320 */
321 if (__mutex_slowpath_needs_to_unlock())
322 atomic_set(&lock->count, 1);
323
Ingo Molnar6053ee32006-01-09 15:59:19 -0800324 if (!list_empty(&lock->wait_list)) {
325 /* get the first entry from the wait-list: */
326 struct mutex_waiter *waiter =
327 list_entry(lock->wait_list.next,
328 struct mutex_waiter, list);
329
330 debug_mutex_wake_waiter(lock, waiter);
331
332 wake_up_process(waiter->task);
333 }
334
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700335 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800336}
337
338/*
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700339 * Release the lock, slowpath:
340 */
Török Edwin7918baa2008-11-24 10:17:42 +0200341static __used noinline void
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700342__mutex_unlock_slowpath(atomic_t *lock_count)
343{
Ingo Molnaref5d4702006-07-03 00:24:55 -0700344 __mutex_unlock_common_slowpath(lock_count, 1);
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700345}
346
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200347#ifndef CONFIG_DEBUG_LOCK_ALLOC
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700348/*
Ingo Molnar6053ee32006-01-09 15:59:19 -0800349 * Here come the less common (and hence less performance-critical) APIs:
350 * mutex_lock_interruptible() and mutex_trylock().
351 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800352static noinline int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500353__mutex_lock_killable_slowpath(atomic_t *lock_count);
354
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800355static noinline int __sched
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700356__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800357
Randy Dunlapef5dc122010-09-02 15:48:16 -0700358/**
359 * mutex_lock_interruptible - acquire the mutex, interruptible
Ingo Molnar6053ee32006-01-09 15:59:19 -0800360 * @lock: the mutex to be acquired
361 *
362 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
363 * been acquired or sleep until the mutex becomes available. If a
364 * signal arrives while waiting for the lock then this function
365 * returns -EINTR.
366 *
367 * This function is similar to (but not equivalent to) down_interruptible().
368 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800369int __sched mutex_lock_interruptible(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800370{
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100371 int ret;
372
Ingo Molnarc544bdb2006-01-10 22:10:36 +0100373 might_sleep();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100374 ret = __mutex_fastpath_lock_retval
Ingo Molnar6053ee32006-01-09 15:59:19 -0800375 (&lock->count, __mutex_lock_interruptible_slowpath);
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100376 if (!ret)
377 mutex_set_owner(lock);
378
379 return ret;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800380}
381
382EXPORT_SYMBOL(mutex_lock_interruptible);
383
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800384int __sched mutex_lock_killable(struct mutex *lock)
Liam R. Howlettad776532007-12-06 17:37:59 -0500385{
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100386 int ret;
387
Liam R. Howlettad776532007-12-06 17:37:59 -0500388 might_sleep();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100389 ret = __mutex_fastpath_lock_retval
Liam R. Howlettad776532007-12-06 17:37:59 -0500390 (&lock->count, __mutex_lock_killable_slowpath);
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100391 if (!ret)
392 mutex_set_owner(lock);
393
394 return ret;
Liam R. Howlettad776532007-12-06 17:37:59 -0500395}
396EXPORT_SYMBOL(mutex_lock_killable);
397
Török Edwin7918baa2008-11-24 10:17:42 +0200398static __used noinline void __sched
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200399__mutex_lock_slowpath(atomic_t *lock_count)
400{
401 struct mutex *lock = container_of(lock_count, struct mutex, count);
402
403 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
404}
405
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800406static noinline int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500407__mutex_lock_killable_slowpath(atomic_t *lock_count)
408{
409 struct mutex *lock = container_of(lock_count, struct mutex, count);
410
411 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
412}
413
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800414static noinline int __sched
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700415__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800416{
417 struct mutex *lock = container_of(lock_count, struct mutex, count);
418
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200419 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800420}
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200421#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -0800422
423/*
424 * Spinlock based trylock, we take the spinlock and check whether we
425 * can get the lock:
426 */
427static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
428{
429 struct mutex *lock = container_of(lock_count, struct mutex, count);
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700430 unsigned long flags;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800431 int prev;
432
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700433 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800434
435 prev = atomic_xchg(&lock->count, -1);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700436 if (likely(prev == 1)) {
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100437 mutex_set_owner(lock);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700438 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
439 }
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100440
Ingo Molnar6053ee32006-01-09 15:59:19 -0800441 /* Set it back to 0 if there are no waiters: */
442 if (likely(list_empty(&lock->wait_list)))
443 atomic_set(&lock->count, 0);
444
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700445 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800446
447 return prev == 1;
448}
449
Randy Dunlapef5dc122010-09-02 15:48:16 -0700450/**
451 * mutex_trylock - try to acquire the mutex, without waiting
Ingo Molnar6053ee32006-01-09 15:59:19 -0800452 * @lock: the mutex to be acquired
453 *
454 * Try to acquire the mutex atomically. Returns 1 if the mutex
455 * has been acquired successfully, and 0 on contention.
456 *
457 * NOTE: this function follows the spin_trylock() convention, so
Randy Dunlapef5dc122010-09-02 15:48:16 -0700458 * it is negated from the down_trylock() return values! Be careful
Ingo Molnar6053ee32006-01-09 15:59:19 -0800459 * about this when converting semaphore users to mutexes.
460 *
461 * This function must not be used in interrupt context. The
462 * mutex must be released by the same task that acquired it.
463 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800464int __sched mutex_trylock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800465{
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100466 int ret;
467
468 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
469 if (ret)
470 mutex_set_owner(lock);
471
472 return ret;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800473}
Ingo Molnar6053ee32006-01-09 15:59:19 -0800474EXPORT_SYMBOL(mutex_trylock);
Andrew Mortona511e3f2009-04-29 15:59:58 -0700475
476/**
477 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
478 * @cnt: the atomic which we are to dec
479 * @lock: the mutex to return holding if we dec to 0
480 *
481 * return true and hold lock if we dec to 0, return false otherwise
482 */
483int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
484{
485 /* dec if we can't possibly hit 0 */
486 if (atomic_add_unless(cnt, -1, 1))
487 return 0;
488 /* we might hit 0, so take the lock */
489 mutex_lock(lock);
490 if (!atomic_dec_and_test(cnt)) {
491 /* when we actually did the dec, we didn't hit 0 */
492 mutex_unlock(lock);
493 return 0;
494 }
495 /* we hit 0, and we hold the lock */
496 return 1;
497}
498EXPORT_SYMBOL(atomic_dec_and_mutex_lock);