[PATCH] rtmutex: Propagate priority settings into PI lock chains

When the priority of a task, which is blocked on a lock, changes we must
propagate this change into the PI lock chain.  Therefor the chain walk code
is changed to get rid of the references to current to avoid false positives
in the deadlock detector, as setscheduler might be called by a task which
holds the lock on which the task whose priority is changed is blocked.

Also add some comments about the get/put_task_struct usage to avoid
confusion.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b4e6be7..821f048 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1044,11 +1044,13 @@
 #ifdef CONFIG_RT_MUTEXES
 extern int rt_mutex_getprio(task_t *p);
 extern void rt_mutex_setprio(task_t *p, int prio);
+extern void rt_mutex_adjust_pi(task_t *p);
 #else
 static inline int rt_mutex_getprio(task_t *p)
 {
 	return p->normal_prio;
 }
+# define rt_mutex_adjust_pi(p)		do { } while (0)
 #endif
 
 extern void set_user_nice(task_t *p, long nice);
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 3fc0f06..45d6101 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -160,7 +160,8 @@
 static int rt_mutex_adjust_prio_chain(task_t *task,
 				      int deadlock_detect,
 				      struct rt_mutex *orig_lock,
-				      struct rt_mutex_waiter *orig_waiter
+				      struct rt_mutex_waiter *orig_waiter,
+				      struct task_struct *top_task
 				      __IP_DECL__)
 {
 	struct rt_mutex *lock;
@@ -189,7 +190,7 @@
 			prev_max = max_lock_depth;
 			printk(KERN_WARNING "Maximum lock depth %d reached "
 			       "task: %s (%d)\n", max_lock_depth,
-			       current->comm, current->pid);
+			       top_task->comm, top_task->pid);
 		}
 		put_task_struct(task);
 
@@ -229,7 +230,7 @@
 	}
 
 	/* Deadlock detection */
-	if (lock == orig_lock || rt_mutex_owner(lock) == current) {
+	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
 		debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
 		spin_unlock(&lock->wait_lock);
 		ret = deadlock_detect ? -EDEADLK : 0;
@@ -433,6 +434,7 @@
 		__rt_mutex_adjust_prio(owner);
 		if (owner->pi_blocked_on) {
 			boost = 1;
+			/* gets dropped in rt_mutex_adjust_prio_chain()! */
 			get_task_struct(owner);
 		}
 		spin_unlock_irqrestore(&owner->pi_lock, flags);
@@ -441,6 +443,7 @@
 		spin_lock_irqsave(&owner->pi_lock, flags);
 		if (owner->pi_blocked_on) {
 			boost = 1;
+			/* gets dropped in rt_mutex_adjust_prio_chain()! */
 			get_task_struct(owner);
 		}
 		spin_unlock_irqrestore(&owner->pi_lock, flags);
@@ -450,8 +453,8 @@
 
 	spin_unlock(&lock->wait_lock);
 
-	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
-					 waiter __IP__);
+	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
+					 current __IP__);
 
 	spin_lock(&lock->wait_lock);
 
@@ -552,6 +555,7 @@
 
 		if (owner->pi_blocked_on) {
 			boost = 1;
+			/* gets dropped in rt_mutex_adjust_prio_chain()! */
 			get_task_struct(owner);
 		}
 		spin_unlock_irqrestore(&owner->pi_lock, flags);
@@ -564,12 +568,37 @@
 
 	spin_unlock(&lock->wait_lock);
 
-	rt_mutex_adjust_prio_chain(owner, 0, lock, NULL __IP__);
+	rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__);
 
 	spin_lock(&lock->wait_lock);
 }
 
 /*
+ * Recheck the pi chain, in case we got a priority setting
+ *
+ * Called from sched_setscheduler
+ */
+void rt_mutex_adjust_pi(struct task_struct *task)
+{
+	struct rt_mutex_waiter *waiter;
+	unsigned long flags;
+
+	spin_lock_irqsave(&task->pi_lock, flags);
+
+	waiter = task->pi_blocked_on;
+	if (!waiter || waiter->list_entry.prio == task->prio) {
+		spin_unlock_irqrestore(&task->pi_lock, flags);
+		return;
+	}
+
+	/* gets dropped in rt_mutex_adjust_prio_chain()! */
+	get_task_struct(task);
+	spin_unlock_irqrestore(&task->pi_lock, flags);
+
+	rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__);
+}
+
+/*
  * Slow path lock function:
  */
 static int __sched
@@ -636,6 +665,7 @@
 			if (unlikely(ret))
 				break;
 		}
+
 		spin_unlock(&lock->wait_lock);
 
 		debug_rt_mutex_print_deadlock(&waiter);
diff --git a/kernel/sched.c b/kernel/sched.c
index 7a30add..2629c17 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4070,6 +4070,8 @@
 	__task_rq_unlock(rq);
 	spin_unlock_irqrestore(&p->pi_lock, flags);
 
+	rt_mutex_adjust_pi(p);
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(sched_setscheduler);