blob: a6fbb41305210c9d3d45c56a670f3fe96af6ce64 [file] [log] [blame]
Ingo Molnar23f78d4a2006-06-27 02:54:53 -07001/*
2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3 *
4 * started by Ingo Molnar and Thomas Gleixner.
5 *
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen
Steven Rostedtd07fe822006-07-30 03:04:03 -070010 *
11 * See Documentation/rt-mutex-design.txt for details.
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070012 */
13#include <linux/spinlock.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/timer.h>
17
18#include "rtmutex_common.h"
19
20#ifdef CONFIG_DEBUG_RT_MUTEXES
21# include "rtmutex-debug.h"
22#else
23# include "rtmutex.h"
24#endif
25
26/*
27 * lock->owner state tracking:
28 *
29 * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
30 * are used to keep track of the "owner is pending" and "lock has
31 * waiters" state.
32 *
33 * owner bit1 bit0
34 * NULL 0 0 lock is free (fast acquire possible)
35 * NULL 0 1 invalid state
36 * NULL 1 0 Transitional State*
37 * NULL 1 1 invalid state
38 * taskpointer 0 0 lock is held (fast release possible)
39 * taskpointer 0 1 task is pending owner
40 * taskpointer 1 0 lock is held and has waiters
41 * taskpointer 1 1 task is pending owner and lock has more waiters
42 *
43 * Pending ownership is assigned to the top (highest priority)
44 * waiter of the lock, when the lock is released. The thread is woken
45 * up and can now take the lock. Until the lock is taken (bit 0
46 * cleared) a competing higher priority thread can steal the lock
47 * which puts the woken up thread back on the waiters list.
48 *
49 * The fast atomic compare exchange based acquire and release is only
50 * possible when bit 0 and 1 of lock->owner are 0.
51 *
52 * (*) There's a small time where the owner can be NULL and the
53 * "lock has waiters" bit is set. This can happen when grabbing the lock.
54 * To prevent a cmpxchg of the owner releasing the lock, we need to set this
55 * bit before looking at the lock, hence the reason this is a transitional
56 * state.
57 */
58
Pierre Peifferd0aa7a72007-05-09 02:35:02 -070059void
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070060rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
61 unsigned long mask)
62{
63 unsigned long val = (unsigned long)owner | mask;
64
65 if (rt_mutex_has_waiters(lock))
66 val |= RT_MUTEX_HAS_WAITERS;
67
68 lock->owner = (struct task_struct *)val;
69}
70
71static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
72{
73 lock->owner = (struct task_struct *)
74 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
75}
76
77static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
78{
79 if (!rt_mutex_has_waiters(lock))
80 clear_rt_mutex_waiters(lock);
81}
82
83/*
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070084 * Calculate task priority from the waiter list priority
85 *
86 * Return task->normal_prio when the waiter list is empty or when
87 * the waiter is not allowed to do priority boosting
88 */
89int rt_mutex_getprio(struct task_struct *task)
90{
91 if (likely(!task_has_pi_waiters(task)))
92 return task->normal_prio;
93
94 return min(task_top_pi_waiter(task)->pi_list_entry.prio,
95 task->normal_prio);
96}
97
98/*
99 * Adjust the priority of a task, after its pi_waiters got modified.
100 *
101 * This can be both boosting and unboosting. task->pi_lock must be held.
102 */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -0700103void __rt_mutex_adjust_prio(struct task_struct *task)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700104{
105 int prio = rt_mutex_getprio(task);
106
107 if (task->prio != prio)
108 rt_mutex_setprio(task, prio);
109}
110
111/*
112 * Adjust task priority (undo boosting). Called from the exit path of
113 * rt_mutex_slowunlock() and rt_mutex_slowlock().
114 *
115 * (Note: We do this outside of the protection of lock->wait_lock to
116 * allow the lock to be taken while or before we readjust the priority
117 * of task. We do not use the spin_xx_mutex() variants here as we are
118 * outside of the debug path.)
119 */
120static void rt_mutex_adjust_prio(struct task_struct *task)
121{
122 unsigned long flags;
123
124 spin_lock_irqsave(&task->pi_lock, flags);
125 __rt_mutex_adjust_prio(task);
126 spin_unlock_irqrestore(&task->pi_lock, flags);
127}
128
129/*
130 * Max number of times we'll walk the boosting chain:
131 */
132int max_lock_depth = 1024;
133
134/*
135 * Adjust the priority chain. Also used for deadlock detection.
136 * Decreases task's usage by one - may thus free the task.
137 * Returns 0 or -EDEADLK.
138 */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -0700139int rt_mutex_adjust_prio_chain(struct task_struct *task,
140 int deadlock_detect,
141 struct rt_mutex *orig_lock,
142 struct rt_mutex_waiter *orig_waiter,
143 struct task_struct *top_task)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700144{
145 struct rt_mutex *lock;
146 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
147 int detect_deadlock, ret = 0, depth = 0;
148 unsigned long flags;
149
150 detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
151 deadlock_detect);
152
153 /*
154 * The (de)boosting is a step by step approach with a lot of
155 * pitfalls. We want this to be preemptible and we want hold a
156 * maximum of two locks per step. So we have to check
157 * carefully whether things change under us.
158 */
159 again:
160 if (++depth > max_lock_depth) {
161 static int prev_max;
162
163 /*
164 * Print this only once. If the admin changes the limit,
165 * print a new message when reaching the limit again.
166 */
167 if (prev_max != max_lock_depth) {
168 prev_max = max_lock_depth;
169 printk(KERN_WARNING "Maximum lock depth %d reached "
170 "task: %s (%d)\n", max_lock_depth,
Thomas Gleixner95e02ca2006-06-27 02:55:02 -0700171 top_task->comm, top_task->pid);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700172 }
173 put_task_struct(task);
174
175 return deadlock_detect ? -EDEADLK : 0;
176 }
177 retry:
178 /*
179 * Task can not go away as we did a get_task() before !
180 */
181 spin_lock_irqsave(&task->pi_lock, flags);
182
183 waiter = task->pi_blocked_on;
184 /*
185 * Check whether the end of the boosting chain has been
186 * reached or the state of the chain has changed while we
187 * dropped the locks.
188 */
189 if (!waiter || !waiter->task)
190 goto out_unlock_pi;
191
Thomas Gleixner1a539a82007-06-08 13:46:58 -0700192 /*
193 * Check the orig_waiter state. After we dropped the locks,
194 * the previous owner of the lock might have released the lock
195 * and made us the pending owner:
196 */
197 if (orig_waiter && !orig_waiter->task)
198 goto out_unlock_pi;
199
200 /*
201 * Drop out, when the task has no waiters. Note,
202 * top_waiter can be NULL, when we are in the deboosting
203 * mode!
204 */
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700205 if (top_waiter && (!task_has_pi_waiters(task) ||
206 top_waiter != task_top_pi_waiter(task)))
207 goto out_unlock_pi;
208
209 /*
210 * When deadlock detection is off then we check, if further
211 * priority adjustment is necessary.
212 */
213 if (!detect_deadlock && waiter->list_entry.prio == task->prio)
214 goto out_unlock_pi;
215
216 lock = waiter->lock;
217 if (!spin_trylock(&lock->wait_lock)) {
218 spin_unlock_irqrestore(&task->pi_lock, flags);
219 cpu_relax();
220 goto retry;
221 }
222
223 /* Deadlock detection */
Thomas Gleixner95e02ca2006-06-27 02:55:02 -0700224 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700225 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
226 spin_unlock(&lock->wait_lock);
227 ret = deadlock_detect ? -EDEADLK : 0;
228 goto out_unlock_pi;
229 }
230
231 top_waiter = rt_mutex_top_waiter(lock);
232
233 /* Requeue the waiter */
234 plist_del(&waiter->list_entry, &lock->wait_list);
235 waiter->list_entry.prio = task->prio;
236 plist_add(&waiter->list_entry, &lock->wait_list);
237
238 /* Release the task */
239 spin_unlock_irqrestore(&task->pi_lock, flags);
240 put_task_struct(task);
241
242 /* Grab the next task */
243 task = rt_mutex_owner(lock);
Steven Rostedtdb630632006-09-29 01:59:44 -0700244 get_task_struct(task);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700245 spin_lock_irqsave(&task->pi_lock, flags);
246
247 if (waiter == rt_mutex_top_waiter(lock)) {
248 /* Boost the owner */
249 plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
250 waiter->pi_list_entry.prio = waiter->list_entry.prio;
251 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
252 __rt_mutex_adjust_prio(task);
253
254 } else if (top_waiter == waiter) {
255 /* Deboost the owner */
256 plist_del(&waiter->pi_list_entry, &task->pi_waiters);
257 waiter = rt_mutex_top_waiter(lock);
258 waiter->pi_list_entry.prio = waiter->list_entry.prio;
259 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
260 __rt_mutex_adjust_prio(task);
261 }
262
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700263 spin_unlock_irqrestore(&task->pi_lock, flags);
264
265 top_waiter = rt_mutex_top_waiter(lock);
266 spin_unlock(&lock->wait_lock);
267
268 if (!detect_deadlock && waiter != top_waiter)
269 goto out_put_task;
270
271 goto again;
272
273 out_unlock_pi:
274 spin_unlock_irqrestore(&task->pi_lock, flags);
275 out_put_task:
276 put_task_struct(task);
Ingo Molnar36c8b582006-07-03 00:25:41 -0700277
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700278 return ret;
279}
280
281/*
282 * Optimization: check if we can steal the lock from the
283 * assigned pending owner [which might not have taken the
284 * lock yet]:
285 */
286static inline int try_to_steal_lock(struct rt_mutex *lock)
287{
288 struct task_struct *pendowner = rt_mutex_owner(lock);
289 struct rt_mutex_waiter *next;
290 unsigned long flags;
291
292 if (!rt_mutex_owner_pending(lock))
293 return 0;
294
295 if (pendowner == current)
296 return 1;
297
298 spin_lock_irqsave(&pendowner->pi_lock, flags);
299 if (current->prio >= pendowner->prio) {
300 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
301 return 0;
302 }
303
304 /*
305 * Check if a waiter is enqueued on the pending owners
306 * pi_waiters list. Remove it and readjust pending owners
307 * priority.
308 */
309 if (likely(!rt_mutex_has_waiters(lock))) {
310 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
311 return 1;
312 }
313
314 /* No chain handling, pending owner is not blocked on anything: */
315 next = rt_mutex_top_waiter(lock);
316 plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
317 __rt_mutex_adjust_prio(pendowner);
318 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
319
320 /*
321 * We are going to steal the lock and a waiter was
322 * enqueued on the pending owners pi_waiters queue. So
323 * we have to enqueue this waiter into
324 * current->pi_waiters list. This covers the case,
325 * where current is boosted because it holds another
326 * lock and gets unboosted because the booster is
327 * interrupted, so we would delay a waiter with higher
328 * priority as current->normal_prio.
329 *
330 * Note: in the rare case of a SCHED_OTHER task changing
331 * its priority and thus stealing the lock, next->task
332 * might be current:
333 */
334 if (likely(next->task != current)) {
335 spin_lock_irqsave(&current->pi_lock, flags);
336 plist_add(&next->pi_list_entry, &current->pi_waiters);
337 __rt_mutex_adjust_prio(current);
338 spin_unlock_irqrestore(&current->pi_lock, flags);
339 }
340 return 1;
341}
342
343/*
344 * Try to take an rt-mutex
345 *
346 * This fails
347 * - when the lock has a real owner
348 * - when a different pending owner exists and has higher priority than current
349 *
350 * Must be called with lock->wait_lock held.
351 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700352static int try_to_take_rt_mutex(struct rt_mutex *lock)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700353{
354 /*
355 * We have to be careful here if the atomic speedups are
356 * enabled, such that, when
357 * - no other waiter is on the lock
358 * - the lock has been released since we did the cmpxchg
359 * the lock can be released or taken while we are doing the
360 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
361 *
362 * The atomic acquire/release aware variant of
363 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
364 * the WAITERS bit, the atomic release / acquire can not
365 * happen anymore and lock->wait_lock protects us from the
366 * non-atomic case.
367 *
368 * Note, that this might set lock->owner =
369 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
370 * any more. This is fixed up when we take the ownership.
371 * This is the transitional state explained at the top of this file.
372 */
373 mark_rt_mutex_waiters(lock);
374
375 if (rt_mutex_owner(lock) && !try_to_steal_lock(lock))
376 return 0;
377
378 /* We got the lock. */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700379 debug_rt_mutex_lock(lock);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700380
381 rt_mutex_set_owner(lock, current, 0);
382
383 rt_mutex_deadlock_account_lock(lock, current);
384
385 return 1;
386}
387
388/*
389 * Task blocks on lock.
390 *
391 * Prepare waiter and propagate pi chain
392 *
393 * This must be called with lock->wait_lock held.
394 */
395static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
396 struct rt_mutex_waiter *waiter,
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700397 int detect_deadlock)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700398{
Ingo Molnar36c8b582006-07-03 00:25:41 -0700399 struct task_struct *owner = rt_mutex_owner(lock);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700400 struct rt_mutex_waiter *top_waiter = waiter;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700401 unsigned long flags;
Steven Rostedtdb630632006-09-29 01:59:44 -0700402 int chain_walk = 0, res;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700403
404 spin_lock_irqsave(&current->pi_lock, flags);
405 __rt_mutex_adjust_prio(current);
406 waiter->task = current;
407 waiter->lock = lock;
408 plist_node_init(&waiter->list_entry, current->prio);
409 plist_node_init(&waiter->pi_list_entry, current->prio);
410
411 /* Get the top priority waiter on the lock */
412 if (rt_mutex_has_waiters(lock))
413 top_waiter = rt_mutex_top_waiter(lock);
414 plist_add(&waiter->list_entry, &lock->wait_list);
415
416 current->pi_blocked_on = waiter;
417
418 spin_unlock_irqrestore(&current->pi_lock, flags);
419
420 if (waiter == rt_mutex_top_waiter(lock)) {
421 spin_lock_irqsave(&owner->pi_lock, flags);
422 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
423 plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
424
425 __rt_mutex_adjust_prio(owner);
Steven Rostedtdb630632006-09-29 01:59:44 -0700426 if (owner->pi_blocked_on)
427 chain_walk = 1;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700428 spin_unlock_irqrestore(&owner->pi_lock, flags);
429 }
Steven Rostedtdb630632006-09-29 01:59:44 -0700430 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
431 chain_walk = 1;
432
433 if (!chain_walk)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700434 return 0;
435
Steven Rostedtdb630632006-09-29 01:59:44 -0700436 /*
437 * The owner can't disappear while holding a lock,
438 * so the owner struct is protected by wait_lock.
439 * Gets dropped in rt_mutex_adjust_prio_chain()!
440 */
441 get_task_struct(owner);
442
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700443 spin_unlock(&lock->wait_lock);
444
Thomas Gleixner95e02ca2006-06-27 02:55:02 -0700445 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700446 current);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700447
448 spin_lock(&lock->wait_lock);
449
450 return res;
451}
452
453/*
454 * Wake up the next waiter on the lock.
455 *
456 * Remove the top waiter from the current tasks waiter list and from
457 * the lock waiter list. Set it as pending owner. Then wake it up.
458 *
459 * Called with lock->wait_lock held.
460 */
461static void wakeup_next_waiter(struct rt_mutex *lock)
462{
463 struct rt_mutex_waiter *waiter;
464 struct task_struct *pendowner;
465 unsigned long flags;
466
467 spin_lock_irqsave(&current->pi_lock, flags);
468
469 waiter = rt_mutex_top_waiter(lock);
470 plist_del(&waiter->list_entry, &lock->wait_list);
471
472 /*
473 * Remove it from current->pi_waiters. We do not adjust a
474 * possible priority boost right now. We execute wakeup in the
475 * boosted mode and go back to normal after releasing
476 * lock->wait_lock.
477 */
478 plist_del(&waiter->pi_list_entry, &current->pi_waiters);
479 pendowner = waiter->task;
480 waiter->task = NULL;
481
482 rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
483
484 spin_unlock_irqrestore(&current->pi_lock, flags);
485
486 /*
487 * Clear the pi_blocked_on variable and enqueue a possible
488 * waiter into the pi_waiters list of the pending owner. This
489 * prevents that in case the pending owner gets unboosted a
490 * waiter with higher priority than pending-owner->normal_prio
491 * is blocked on the unboosted (pending) owner.
492 */
493 spin_lock_irqsave(&pendowner->pi_lock, flags);
494
495 WARN_ON(!pendowner->pi_blocked_on);
496 WARN_ON(pendowner->pi_blocked_on != waiter);
497 WARN_ON(pendowner->pi_blocked_on->lock != lock);
498
499 pendowner->pi_blocked_on = NULL;
500
501 if (rt_mutex_has_waiters(lock)) {
502 struct rt_mutex_waiter *next;
503
504 next = rt_mutex_top_waiter(lock);
505 plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
506 }
507 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
508
509 wake_up_process(pendowner);
510}
511
512/*
513 * Remove a waiter from a lock
514 *
515 * Must be called with lock->wait_lock held
516 */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -0700517void remove_waiter(struct rt_mutex *lock,
518 struct rt_mutex_waiter *waiter)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700519{
520 int first = (waiter == rt_mutex_top_waiter(lock));
Ingo Molnar36c8b582006-07-03 00:25:41 -0700521 struct task_struct *owner = rt_mutex_owner(lock);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700522 unsigned long flags;
Steven Rostedtdb630632006-09-29 01:59:44 -0700523 int chain_walk = 0;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700524
525 spin_lock_irqsave(&current->pi_lock, flags);
526 plist_del(&waiter->list_entry, &lock->wait_list);
527 waiter->task = NULL;
528 current->pi_blocked_on = NULL;
529 spin_unlock_irqrestore(&current->pi_lock, flags);
530
531 if (first && owner != current) {
532
533 spin_lock_irqsave(&owner->pi_lock, flags);
534
535 plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
536
537 if (rt_mutex_has_waiters(lock)) {
538 struct rt_mutex_waiter *next;
539
540 next = rt_mutex_top_waiter(lock);
541 plist_add(&next->pi_list_entry, &owner->pi_waiters);
542 }
543 __rt_mutex_adjust_prio(owner);
544
Steven Rostedtdb630632006-09-29 01:59:44 -0700545 if (owner->pi_blocked_on)
546 chain_walk = 1;
547
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700548 spin_unlock_irqrestore(&owner->pi_lock, flags);
549 }
550
551 WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
552
Steven Rostedtdb630632006-09-29 01:59:44 -0700553 if (!chain_walk)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700554 return;
555
Steven Rostedtdb630632006-09-29 01:59:44 -0700556 /* gets dropped in rt_mutex_adjust_prio_chain()! */
557 get_task_struct(owner);
558
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700559 spin_unlock(&lock->wait_lock);
560
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700561 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700562
563 spin_lock(&lock->wait_lock);
564}
565
566/*
Thomas Gleixner95e02ca2006-06-27 02:55:02 -0700567 * Recheck the pi chain, in case we got a priority setting
568 *
569 * Called from sched_setscheduler
570 */
571void rt_mutex_adjust_pi(struct task_struct *task)
572{
573 struct rt_mutex_waiter *waiter;
574 unsigned long flags;
575
576 spin_lock_irqsave(&task->pi_lock, flags);
577
578 waiter = task->pi_blocked_on;
579 if (!waiter || waiter->list_entry.prio == task->prio) {
580 spin_unlock_irqrestore(&task->pi_lock, flags);
581 return;
582 }
583
Thomas Gleixner95e02ca2006-06-27 02:55:02 -0700584 spin_unlock_irqrestore(&task->pi_lock, flags);
585
Steven Rostedtdb630632006-09-29 01:59:44 -0700586 /* gets dropped in rt_mutex_adjust_prio_chain()! */
587 get_task_struct(task);
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700588 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
Thomas Gleixner95e02ca2006-06-27 02:55:02 -0700589}
590
591/*
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700592 * Slow path lock function:
593 */
594static int __sched
595rt_mutex_slowlock(struct rt_mutex *lock, int state,
596 struct hrtimer_sleeper *timeout,
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700597 int detect_deadlock)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700598{
599 struct rt_mutex_waiter waiter;
600 int ret = 0;
601
602 debug_rt_mutex_init_waiter(&waiter);
603 waiter.task = NULL;
604
605 spin_lock(&lock->wait_lock);
606
607 /* Try to acquire the lock again: */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700608 if (try_to_take_rt_mutex(lock)) {
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700609 spin_unlock(&lock->wait_lock);
610 return 0;
611 }
612
613 set_current_state(state);
614
615 /* Setup the timer, when timeout != NULL */
616 if (unlikely(timeout))
617 hrtimer_start(&timeout->timer, timeout->timer.expires,
Thomas Gleixnerc9cb2e32007-02-16 01:27:49 -0800618 HRTIMER_MODE_ABS);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700619
620 for (;;) {
621 /* Try to acquire the lock: */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700622 if (try_to_take_rt_mutex(lock))
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700623 break;
624
625 /*
626 * TASK_INTERRUPTIBLE checks for signals and
627 * timeout. Ignored otherwise.
628 */
629 if (unlikely(state == TASK_INTERRUPTIBLE)) {
630 /* Signal pending? */
631 if (signal_pending(current))
632 ret = -EINTR;
633 if (timeout && !timeout->task)
634 ret = -ETIMEDOUT;
635 if (ret)
636 break;
637 }
638
639 /*
640 * waiter.task is NULL the first time we come here and
641 * when we have been woken up by the previous owner
642 * but the lock got stolen by a higher prio task.
643 */
644 if (!waiter.task) {
645 ret = task_blocks_on_rt_mutex(lock, &waiter,
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700646 detect_deadlock);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700647 /*
648 * If we got woken up by the owner then start loop
649 * all over without going into schedule to try
650 * to get the lock now:
651 */
Thomas Gleixnerc0d1d2b2007-06-08 13:46:57 -0700652 if (unlikely(!waiter.task)) {
653 /*
654 * Reset the return value. We might
655 * have returned with -EDEADLK and the
656 * owner released the lock while we
657 * were walking the pi chain.
658 */
659 ret = 0;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700660 continue;
Thomas Gleixnerc0d1d2b2007-06-08 13:46:57 -0700661 }
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700662 if (unlikely(ret))
663 break;
664 }
Thomas Gleixner95e02ca2006-06-27 02:55:02 -0700665
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700666 spin_unlock(&lock->wait_lock);
667
668 debug_rt_mutex_print_deadlock(&waiter);
669
Thomas Gleixner61a87122006-06-27 02:54:56 -0700670 if (waiter.task)
671 schedule_rt_mutex(lock);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700672
673 spin_lock(&lock->wait_lock);
674 set_current_state(state);
675 }
676
677 set_current_state(TASK_RUNNING);
678
679 if (unlikely(waiter.task))
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700680 remove_waiter(lock, &waiter);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700681
682 /*
683 * try_to_take_rt_mutex() sets the waiter bit
684 * unconditionally. We might have to fix that up.
685 */
686 fixup_rt_mutex_waiters(lock);
687
688 spin_unlock(&lock->wait_lock);
689
690 /* Remove pending timer: */
691 if (unlikely(timeout))
692 hrtimer_cancel(&timeout->timer);
693
694 /*
695 * Readjust priority, when we did not get the lock. We might
696 * have been the pending owner and boosted. Since we did not
697 * take the lock, the PI boost has to go.
698 */
699 if (unlikely(ret))
700 rt_mutex_adjust_prio(current);
701
702 debug_rt_mutex_free_waiter(&waiter);
703
704 return ret;
705}
706
707/*
708 * Slow path try-lock function:
709 */
710static inline int
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700711rt_mutex_slowtrylock(struct rt_mutex *lock)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700712{
713 int ret = 0;
714
715 spin_lock(&lock->wait_lock);
716
717 if (likely(rt_mutex_owner(lock) != current)) {
718
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700719 ret = try_to_take_rt_mutex(lock);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700720 /*
721 * try_to_take_rt_mutex() sets the lock waiters
722 * bit unconditionally. Clean this up.
723 */
724 fixup_rt_mutex_waiters(lock);
725 }
726
727 spin_unlock(&lock->wait_lock);
728
729 return ret;
730}
731
732/*
733 * Slow path to release a rt-mutex:
734 */
735static void __sched
736rt_mutex_slowunlock(struct rt_mutex *lock)
737{
738 spin_lock(&lock->wait_lock);
739
740 debug_rt_mutex_unlock(lock);
741
742 rt_mutex_deadlock_account_unlock(current);
743
744 if (!rt_mutex_has_waiters(lock)) {
745 lock->owner = NULL;
746 spin_unlock(&lock->wait_lock);
747 return;
748 }
749
750 wakeup_next_waiter(lock);
751
752 spin_unlock(&lock->wait_lock);
753
754 /* Undo pi boosting if necessary: */
755 rt_mutex_adjust_prio(current);
756}
757
758/*
759 * debug aware fast / slowpath lock,trylock,unlock
760 *
761 * The atomic acquire/release ops are compiled away, when either the
762 * architecture does not support cmpxchg or when debugging is enabled.
763 */
764static inline int
765rt_mutex_fastlock(struct rt_mutex *lock, int state,
766 int detect_deadlock,
767 int (*slowfn)(struct rt_mutex *lock, int state,
768 struct hrtimer_sleeper *timeout,
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700769 int detect_deadlock))
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700770{
771 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
772 rt_mutex_deadlock_account_lock(lock, current);
773 return 0;
774 } else
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700775 return slowfn(lock, state, NULL, detect_deadlock);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700776}
777
778static inline int
779rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
780 struct hrtimer_sleeper *timeout, int detect_deadlock,
781 int (*slowfn)(struct rt_mutex *lock, int state,
782 struct hrtimer_sleeper *timeout,
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700783 int detect_deadlock))
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700784{
785 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
786 rt_mutex_deadlock_account_lock(lock, current);
787 return 0;
788 } else
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700789 return slowfn(lock, state, timeout, detect_deadlock);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700790}
791
792static inline int
793rt_mutex_fasttrylock(struct rt_mutex *lock,
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700794 int (*slowfn)(struct rt_mutex *lock))
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700795{
796 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
797 rt_mutex_deadlock_account_lock(lock, current);
798 return 1;
799 }
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700800 return slowfn(lock);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700801}
802
803static inline void
804rt_mutex_fastunlock(struct rt_mutex *lock,
805 void (*slowfn)(struct rt_mutex *lock))
806{
807 if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
808 rt_mutex_deadlock_account_unlock(current);
809 else
810 slowfn(lock);
811}
812
813/**
814 * rt_mutex_lock - lock a rt_mutex
815 *
816 * @lock: the rt_mutex to be locked
817 */
818void __sched rt_mutex_lock(struct rt_mutex *lock)
819{
820 might_sleep();
821
822 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
823}
824EXPORT_SYMBOL_GPL(rt_mutex_lock);
825
826/**
827 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
828 *
829 * @lock: the rt_mutex to be locked
830 * @detect_deadlock: deadlock detection on/off
831 *
832 * Returns:
833 * 0 on success
834 * -EINTR when interrupted by a signal
835 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
836 */
837int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
838 int detect_deadlock)
839{
840 might_sleep();
841
842 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
843 detect_deadlock, rt_mutex_slowlock);
844}
845EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
846
847/**
848 * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
849 * the timeout structure is provided
850 * by the caller
851 *
852 * @lock: the rt_mutex to be locked
853 * @timeout: timeout structure or NULL (no timeout)
854 * @detect_deadlock: deadlock detection on/off
855 *
856 * Returns:
857 * 0 on success
858 * -EINTR when interrupted by a signal
859 * -ETIMEOUT when the timeout expired
860 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
861 */
862int
863rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
864 int detect_deadlock)
865{
866 might_sleep();
867
868 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
869 detect_deadlock, rt_mutex_slowlock);
870}
871EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
872
873/**
874 * rt_mutex_trylock - try to lock a rt_mutex
875 *
876 * @lock: the rt_mutex to be locked
877 *
878 * Returns 1 on success and 0 on contention
879 */
880int __sched rt_mutex_trylock(struct rt_mutex *lock)
881{
882 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
883}
884EXPORT_SYMBOL_GPL(rt_mutex_trylock);
885
886/**
887 * rt_mutex_unlock - unlock a rt_mutex
888 *
889 * @lock: the rt_mutex to be unlocked
890 */
891void __sched rt_mutex_unlock(struct rt_mutex *lock)
892{
893 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
894}
895EXPORT_SYMBOL_GPL(rt_mutex_unlock);
896
897/***
898 * rt_mutex_destroy - mark a mutex unusable
899 * @lock: the mutex to be destroyed
900 *
901 * This function marks the mutex uninitialized, and any subsequent
902 * use of the mutex is forbidden. The mutex must not be locked when
903 * this function is called.
904 */
905void rt_mutex_destroy(struct rt_mutex *lock)
906{
907 WARN_ON(rt_mutex_is_locked(lock));
908#ifdef CONFIG_DEBUG_RT_MUTEXES
909 lock->magic = NULL;
910#endif
911}
912
913EXPORT_SYMBOL_GPL(rt_mutex_destroy);
914
915/**
916 * __rt_mutex_init - initialize the rt lock
917 *
918 * @lock: the rt lock to be initialized
919 *
920 * Initialize the rt lock to unlocked state.
921 *
922 * Initializing of a locked rt lock is not allowed
923 */
924void __rt_mutex_init(struct rt_mutex *lock, const char *name)
925{
926 lock->owner = NULL;
927 spin_lock_init(&lock->wait_lock);
928 plist_head_init(&lock->wait_list, &lock->wait_lock);
929
930 debug_rt_mutex_init(lock, name);
931}
932EXPORT_SYMBOL_GPL(__rt_mutex_init);
Ingo Molnar0cdbee92006-06-27 02:54:57 -0700933
934/**
935 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
936 * proxy owner
937 *
938 * @lock: the rt_mutex to be locked
939 * @proxy_owner:the task to set as owner
940 *
941 * No locking. Caller has to do serializing itself
942 * Special API call for PI-futex support
943 */
944void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
945 struct task_struct *proxy_owner)
946{
947 __rt_mutex_init(lock, NULL);
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700948 debug_rt_mutex_proxy_lock(lock, proxy_owner);
Ingo Molnar0cdbee92006-06-27 02:54:57 -0700949 rt_mutex_set_owner(lock, proxy_owner, 0);
950 rt_mutex_deadlock_account_lock(lock, proxy_owner);
951}
952
953/**
954 * rt_mutex_proxy_unlock - release a lock on behalf of owner
955 *
956 * @lock: the rt_mutex to be locked
957 *
958 * No locking. Caller has to do serializing itself
959 * Special API call for PI-futex support
960 */
961void rt_mutex_proxy_unlock(struct rt_mutex *lock,
962 struct task_struct *proxy_owner)
963{
964 debug_rt_mutex_proxy_unlock(lock);
965 rt_mutex_set_owner(lock, NULL, 0);
966 rt_mutex_deadlock_account_unlock(proxy_owner);
967}
968
969/**
970 * rt_mutex_next_owner - return the next owner of the lock
971 *
972 * @lock: the rt lock query
973 *
974 * Returns the next owner of the lock or NULL
975 *
976 * Caller has to serialize against other accessors to the lock
977 * itself.
978 *
979 * Special API call for PI-futex support
980 */
981struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
982{
983 if (!rt_mutex_has_waiters(lock))
984 return NULL;
985
986 return rt_mutex_top_waiter(lock)->task;
987}