[PATCH] taskstats: kill ->taskstats_lock in favor of ->siglock

signal_struct is (mostly) protected by ->sighand->siglock, I think we don't
need ->taskstats_lock to protect ->stats.  This also allows us to simplify the
locking in fill_tgid().

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Shailabh Nagar <nagar@watson.ibm.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Jay Lan <jlan@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6735c1c..eafe4a7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -466,7 +466,6 @@
 	struct pacct_struct pacct;	/* per-process accounting information */
 #endif
 #ifdef CONFIG_TASKSTATS
-	spinlock_t stats_lock;
 	struct taskstats *stats;
 #endif
 };
diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h
index 6642240..6562a20 100644
--- a/include/linux/taskstats_kern.h
+++ b/include/linux/taskstats_kern.h
@@ -23,28 +23,26 @@
 
 static inline void taskstats_tgid_init(struct signal_struct *sig)
 {
-	spin_lock_init(&sig->stats_lock);
 	sig->stats = NULL;
 }
 
-static inline void taskstats_tgid_alloc(struct signal_struct *sig)
+static inline void taskstats_tgid_alloc(struct task_struct *tsk)
 {
+	struct signal_struct *sig = tsk->signal;
 	struct taskstats *stats;
-	unsigned long flags;
 
 	if (sig->stats != NULL)
 		return;
 
+	/* No problem if kmem_cache_zalloc() fails */
 	stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
-	if (!stats)
-		return;
 
-	spin_lock_irqsave(&sig->stats_lock, flags);
+	spin_lock_irq(&tsk->sighand->siglock);
 	if (!sig->stats) {
 		sig->stats = stats;
 		stats = NULL;
 	}
-	spin_unlock_irqrestore(&sig->stats_lock, flags);
+	spin_unlock_irq(&tsk->sighand->siglock);
 
 	if (stats)
 		kmem_cache_free(taskstats_cache, stats);
@@ -59,7 +57,6 @@
 extern void taskstats_exit_alloc(struct taskstats **, unsigned int *);
 extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int);
 extern void taskstats_init_early(void);
-extern void taskstats_tgid_alloc(struct signal_struct *);
 #else
 static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
 {}
@@ -71,7 +68,7 @@
 {}
 static inline void taskstats_tgid_init(struct signal_struct *sig)
 {}
-static inline void taskstats_tgid_alloc(struct signal_struct *sig)
+static inline void taskstats_tgid_alloc(struct task_struct *tsk)
 {}
 static inline void taskstats_tgid_free(struct signal_struct *sig)
 {}
diff --git a/kernel/fork.c b/kernel/fork.c
index 2133266..3da978e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -830,7 +830,7 @@
 	if (clone_flags & CLONE_THREAD) {
 		atomic_inc(&current->signal->count);
 		atomic_inc(&current->signal->live);
-		taskstats_tgid_alloc(current->signal);
+		taskstats_tgid_alloc(current);
 		return 0;
 	}
 	sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 9aeee51..b2efda9 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -241,11 +241,11 @@
 	tsk = first;
 	read_lock(&tasklist_lock);
 	/* Start with stats from dead tasks */
-	if (first->signal) {
-		spin_lock_irqsave(&first->signal->stats_lock, flags);
+	if (first->sighand) {
+		spin_lock_irqsave(&first->sighand->siglock, flags);
 		if (first->signal->stats)
 			memcpy(stats, first->signal->stats, sizeof(*stats));
-		spin_unlock_irqrestore(&first->signal->stats_lock, flags);
+		spin_unlock_irqrestore(&first->sighand->siglock, flags);
 	}
 
 	do {
@@ -276,7 +276,7 @@
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&tsk->signal->stats_lock, flags);
+	spin_lock_irqsave(&tsk->sighand->siglock, flags);
 	if (!tsk->signal->stats)
 		goto ret;
 
@@ -288,7 +288,7 @@
 	 */
 	delayacct_add_tsk(tsk->signal->stats, tsk);
 ret:
-	spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
+	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
 	return;
 }
 
@@ -464,15 +464,10 @@
 	size_t size;
 	int is_thread_group;
 	struct nlattr *na;
-	unsigned long flags;
 
 	if (!family_registered || !tidstats)
 		return;
 
-	spin_lock_irqsave(&tsk->signal->stats_lock, flags);
-	is_thread_group = tsk->signal->stats ? 1 : 0;
-	spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
-
 	rc = 0;
 	/*
 	 * Size includes space for nested attributes
@@ -480,6 +475,7 @@
 	size = nla_total_size(sizeof(u32)) +
 		nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
 
+	is_thread_group = (tsk->signal->stats != NULL);
 	if (is_thread_group)
 		size = 2 * size;	/* PID + STATS + TGID + STATS */