blob: d046a345d365793e84076e2517ec02cdd11bc40d [file] [log] [blame]
Ingo Molnar6053ee32006-01-09 15:59:19 -08001/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
13 * Also see Documentation/mutex-design.txt.
14 */
15#include <linux/mutex.h>
16#include <linux/sched.h>
17#include <linux/module.h>
18#include <linux/spinlock.h>
19#include <linux/interrupt.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070020#include <linux/debug_locks.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080021
22/*
23 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
24 * which forces all calls into the slowpath:
25 */
26#ifdef CONFIG_DEBUG_MUTEXES
27# include "mutex-debug.h"
28# include <asm-generic/mutex-null.h>
29#else
30# include "mutex.h"
31# include <asm/mutex.h>
32#endif
33
34/***
35 * mutex_init - initialize the mutex
36 * @lock: the mutex to be initialized
37 *
38 * Initialize the mutex to unlocked state.
39 *
40 * It is not allowed to initialize an already locked mutex.
41 */
Ingo Molnaref5d4702006-07-03 00:24:55 -070042void
43__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
Ingo Molnar6053ee32006-01-09 15:59:19 -080044{
45 atomic_set(&lock->count, 1);
46 spin_lock_init(&lock->wait_lock);
47 INIT_LIST_HEAD(&lock->wait_list);
48
Ingo Molnaref5d4702006-07-03 00:24:55 -070049 debug_mutex_init(lock, name, key);
Ingo Molnar6053ee32006-01-09 15:59:19 -080050}
51
52EXPORT_SYMBOL(__mutex_init);
53
Peter Zijlstrae4564f72007-10-11 22:11:12 +020054#ifndef CONFIG_DEBUG_LOCK_ALLOC
Ingo Molnar6053ee32006-01-09 15:59:19 -080055/*
56 * We split the mutex lock/unlock logic into separate fastpath and
57 * slowpath functions, to reduce the register pressure on the fastpath.
58 * We also put the fastpath first in the kernel image, to make sure the
59 * branch is predicted by the CPU as default-untaken.
60 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -080061static void noinline __sched
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070062__mutex_lock_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -080063
64/***
65 * mutex_lock - acquire the mutex
66 * @lock: the mutex to be acquired
67 *
68 * Lock the mutex exclusively for this task. If the mutex is not
69 * available right now, it will sleep until it can get it.
70 *
71 * The mutex must later on be released by the same task that
72 * acquired it. Recursive locking is not allowed. The task
73 * may not exit without first unlocking the mutex. Also, kernel
74 * memory where the mutex resides mutex must not be freed with
75 * the mutex still locked. The mutex must first be initialized
76 * (or statically defined) before it can be locked. memset()-ing
77 * the mutex to 0 is not allowed.
78 *
79 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
80 * checks that will enforce the restrictions and will also do
81 * deadlock debugging. )
82 *
83 * This function is similar to (but not equivalent to) down().
84 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -080085void inline __sched mutex_lock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -080086{
Ingo Molnarc544bdb2006-01-10 22:10:36 +010087 might_sleep();
Ingo Molnar6053ee32006-01-09 15:59:19 -080088 /*
89 * The locking fastpath is the 1->0 transition from
90 * 'unlocked' into 'locked' state.
Ingo Molnar6053ee32006-01-09 15:59:19 -080091 */
92 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
93}
94
95EXPORT_SYMBOL(mutex_lock);
Peter Zijlstrae4564f72007-10-11 22:11:12 +020096#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -080097
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -080098static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -080099
100/***
101 * mutex_unlock - release the mutex
102 * @lock: the mutex to be released
103 *
104 * Unlock a mutex that has been locked by this task previously.
105 *
106 * This function must not be used in interrupt context. Unlocking
107 * of a not locked mutex is not allowed.
108 *
109 * This function is similar to (but not equivalent to) up().
110 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800111void __sched mutex_unlock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800112{
113 /*
114 * The unlocking fastpath is the 0->1 transition from 'locked'
115 * into 'unlocked' state:
Ingo Molnar6053ee32006-01-09 15:59:19 -0800116 */
117 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
118}
119
120EXPORT_SYMBOL(mutex_unlock);
121
122/*
123 * Lock a mutex (possibly interruptible), slowpath:
124 */
125static inline int __sched
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200126__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
127 unsigned long ip)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800128{
129 struct task_struct *task = current;
130 struct mutex_waiter waiter;
131 unsigned int old_val;
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700132 unsigned long flags;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800133
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700134 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800135
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700136 debug_mutex_lock_common(lock, &waiter);
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200137 mutex_acquire(&lock->dep_map, subclass, 0, ip);
Roman Zippelc9f4f062007-05-09 02:35:16 -0700138 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
Ingo Molnar6053ee32006-01-09 15:59:19 -0800139
140 /* add waiting tasks to the end of the waitqueue (FIFO): */
141 list_add_tail(&waiter.list, &lock->wait_list);
142 waiter.task = task;
143
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700144 old_val = atomic_xchg(&lock->count, -1);
145 if (old_val == 1)
146 goto done;
147
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200148 lock_contended(&lock->dep_map, ip);
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700149
Ingo Molnar6053ee32006-01-09 15:59:19 -0800150 for (;;) {
151 /*
152 * Lets try to take the lock again - this is needed even if
153 * we get here for the first time (shortly after failing to
154 * acquire the lock), to make sure that we get a wakeup once
155 * it's unlocked. Later on, if we sleep, this is the
156 * operation that gives us the lock. We xchg it to -1, so
157 * that when we release the lock, we properly wake up the
158 * other waiters:
159 */
160 old_val = atomic_xchg(&lock->count, -1);
161 if (old_val == 1)
162 break;
163
164 /*
165 * got a signal? (This code gets eliminated in the
166 * TASK_UNINTERRUPTIBLE case.)
167 */
Liam R. Howlettad776532007-12-06 17:37:59 -0500168 if (unlikely((state == TASK_INTERRUPTIBLE &&
169 signal_pending(task)) ||
170 (state == TASK_KILLABLE &&
171 fatal_signal_pending(task)))) {
172 mutex_remove_waiter(lock, &waiter,
173 task_thread_info(task));
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200174 mutex_release(&lock->dep_map, 1, ip);
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700175 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800176
177 debug_mutex_free_waiter(&waiter);
178 return -EINTR;
179 }
180 __set_task_state(task, state);
181
182 /* didnt get the lock, go to sleep: */
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700183 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800184 schedule();
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700185 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800186 }
187
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700188done:
Peter Zijlstra96645672007-07-19 01:49:00 -0700189 lock_acquired(&lock->dep_map);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800190 /* got the lock - rejoice! */
Roman Zippelc9f4f062007-05-09 02:35:16 -0700191 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
192 debug_mutex_set_owner(lock, task_thread_info(task));
Ingo Molnar6053ee32006-01-09 15:59:19 -0800193
194 /* set it to 0 if there are no waiters left: */
195 if (likely(list_empty(&lock->wait_list)))
196 atomic_set(&lock->count, 0);
197
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700198 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800199
200 debug_mutex_free_waiter(&waiter);
201
Ingo Molnar6053ee32006-01-09 15:59:19 -0800202 return 0;
203}
204
Ingo Molnaref5d4702006-07-03 00:24:55 -0700205#ifdef CONFIG_DEBUG_LOCK_ALLOC
206void __sched
207mutex_lock_nested(struct mutex *lock, unsigned int subclass)
208{
209 might_sleep();
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200210 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700211}
212
213EXPORT_SYMBOL_GPL(mutex_lock_nested);
NeilBrownd63a5a72006-12-08 02:36:17 -0800214
215int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500216mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
217{
218 might_sleep();
219 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
220}
221EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
222
223int __sched
NeilBrownd63a5a72006-12-08 02:36:17 -0800224mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
225{
226 might_sleep();
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200227 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
NeilBrownd63a5a72006-12-08 02:36:17 -0800228}
229
230EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700231#endif
232
Ingo Molnar6053ee32006-01-09 15:59:19 -0800233/*
234 * Release the lock, slowpath:
235 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800236static inline void
Ingo Molnaref5d4702006-07-03 00:24:55 -0700237__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800238{
Ingo Molnar02706642006-01-10 23:15:02 +0100239 struct mutex *lock = container_of(lock_count, struct mutex, count);
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700240 unsigned long flags;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800241
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700242 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700243 mutex_release(&lock->dep_map, nested, _RET_IP_);
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700244 debug_mutex_unlock(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800245
246 /*
247 * some architectures leave the lock unlocked in the fastpath failure
248 * case, others need to leave it locked. In the later case we have to
249 * unlock it here
250 */
251 if (__mutex_slowpath_needs_to_unlock())
252 atomic_set(&lock->count, 1);
253
Ingo Molnar6053ee32006-01-09 15:59:19 -0800254 if (!list_empty(&lock->wait_list)) {
255 /* get the first entry from the wait-list: */
256 struct mutex_waiter *waiter =
257 list_entry(lock->wait_list.next,
258 struct mutex_waiter, list);
259
260 debug_mutex_wake_waiter(lock, waiter);
261
262 wake_up_process(waiter->task);
263 }
264
265 debug_mutex_clear_owner(lock);
266
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700267 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800268}
269
270/*
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700271 * Release the lock, slowpath:
272 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800273static noinline void
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700274__mutex_unlock_slowpath(atomic_t *lock_count)
275{
Ingo Molnaref5d4702006-07-03 00:24:55 -0700276 __mutex_unlock_common_slowpath(lock_count, 1);
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700277}
278
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200279#ifndef CONFIG_DEBUG_LOCK_ALLOC
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700280/*
Ingo Molnar6053ee32006-01-09 15:59:19 -0800281 * Here come the less common (and hence less performance-critical) APIs:
282 * mutex_lock_interruptible() and mutex_trylock().
283 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800284static noinline int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500285__mutex_lock_killable_slowpath(atomic_t *lock_count);
286
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800287static noinline int __sched
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700288__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800289
290/***
291 * mutex_lock_interruptible - acquire the mutex, interruptable
292 * @lock: the mutex to be acquired
293 *
294 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
295 * been acquired or sleep until the mutex becomes available. If a
296 * signal arrives while waiting for the lock then this function
297 * returns -EINTR.
298 *
299 * This function is similar to (but not equivalent to) down_interruptible().
300 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800301int __sched mutex_lock_interruptible(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800302{
Ingo Molnarc544bdb2006-01-10 22:10:36 +0100303 might_sleep();
Ingo Molnar6053ee32006-01-09 15:59:19 -0800304 return __mutex_fastpath_lock_retval
305 (&lock->count, __mutex_lock_interruptible_slowpath);
306}
307
308EXPORT_SYMBOL(mutex_lock_interruptible);
309
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800310int __sched mutex_lock_killable(struct mutex *lock)
Liam R. Howlettad776532007-12-06 17:37:59 -0500311{
312 might_sleep();
313 return __mutex_fastpath_lock_retval
314 (&lock->count, __mutex_lock_killable_slowpath);
315}
316EXPORT_SYMBOL(mutex_lock_killable);
317
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800318static noinline void __sched
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200319__mutex_lock_slowpath(atomic_t *lock_count)
320{
321 struct mutex *lock = container_of(lock_count, struct mutex, count);
322
323 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
324}
325
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800326static noinline int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500327__mutex_lock_killable_slowpath(atomic_t *lock_count)
328{
329 struct mutex *lock = container_of(lock_count, struct mutex, count);
330
331 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
332}
333
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800334static noinline int __sched
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700335__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800336{
337 struct mutex *lock = container_of(lock_count, struct mutex, count);
338
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200339 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800340}
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200341#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -0800342
343/*
344 * Spinlock based trylock, we take the spinlock and check whether we
345 * can get the lock:
346 */
347static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
348{
349 struct mutex *lock = container_of(lock_count, struct mutex, count);
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700350 unsigned long flags;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800351 int prev;
352
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700353 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800354
355 prev = atomic_xchg(&lock->count, -1);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700356 if (likely(prev == 1)) {
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700357 debug_mutex_set_owner(lock, current_thread_info());
Ingo Molnaref5d4702006-07-03 00:24:55 -0700358 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
359 }
Ingo Molnar6053ee32006-01-09 15:59:19 -0800360 /* Set it back to 0 if there are no waiters: */
361 if (likely(list_empty(&lock->wait_list)))
362 atomic_set(&lock->count, 0);
363
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700364 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800365
366 return prev == 1;
367}
368
369/***
370 * mutex_trylock - try acquire the mutex, without waiting
371 * @lock: the mutex to be acquired
372 *
373 * Try to acquire the mutex atomically. Returns 1 if the mutex
374 * has been acquired successfully, and 0 on contention.
375 *
376 * NOTE: this function follows the spin_trylock() convention, so
377 * it is negated to the down_trylock() return values! Be careful
378 * about this when converting semaphore users to mutexes.
379 *
380 * This function must not be used in interrupt context. The
381 * mutex must be released by the same task that acquired it.
382 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800383int __sched mutex_trylock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800384{
385 return __mutex_fastpath_trylock(&lock->count,
386 __mutex_trylock_slowpath);
387}
388
389EXPORT_SYMBOL(mutex_trylock);