blob: 9ffea360a778a3ebf2890fdff5c1b79ddc5542c6 [file] [log] [blame]
Shailabh Nagarc7572492006-07-14 00:24:40 -07001/*
2 * taskstats.c - Export per-task statistics to userland
3 *
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/taskstats_kern.h>
Jay Lanf3cef7a2006-09-30 23:28:55 -070021#include <linux/tsacct_kern.h>
Shailabh Nagar6f449932006-07-14 00:24:41 -070022#include <linux/delayacct.h>
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070023#include <linux/cpumask.h>
24#include <linux/percpu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Balbir Singh846c7bb2007-10-18 23:39:44 -070026#include <linux/cgroupstats.h>
27#include <linux/cgroup.h>
28#include <linux/fs.h>
29#include <linux/file.h>
Shailabh Nagarc7572492006-07-14 00:24:40 -070030#include <net/genetlink.h>
31#include <asm/atomic.h>
32
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070033/*
34 * Maximum length of a cpumask that can be specified in
35 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
36 */
37#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
38
Vegard Nossumb81f3ea2008-07-25 01:48:55 -070039static DEFINE_PER_CPU(__u32, taskstats_seqnum);
Shailabh Nagarc7572492006-07-14 00:24:40 -070040static int family_registered;
Christoph Lametere18b8902006-12-06 20:33:20 -080041struct kmem_cache *taskstats_cache;
Shailabh Nagarc7572492006-07-14 00:24:40 -070042
43static struct genl_family family = {
44 .id = GENL_ID_GENERATE,
45 .name = TASKSTATS_GENL_NAME,
46 .version = TASKSTATS_GENL_VERSION,
47 .maxattr = TASKSTATS_CMD_ATTR_MAX,
48};
49
Alexey Dobriyanb54452b2010-02-18 08:14:31 +000050static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
Shailabh Nagarc7572492006-07-14 00:24:40 -070051 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
52 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070053 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
54 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
55
Alexey Dobriyanb54452b2010-02-18 08:14:31 +000056static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
Balbir Singh846c7bb2007-10-18 23:39:44 -070057 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
58};
59
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070060struct listener {
61 struct list_head list;
62 pid_t pid;
Shailabh Nagarbb129992006-07-14 00:24:47 -070063 char valid;
Shailabh Nagarc7572492006-07-14 00:24:40 -070064};
65
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070066struct listener_list {
67 struct rw_semaphore sem;
68 struct list_head list;
69};
70static DEFINE_PER_CPU(struct listener_list, listener_array);
71
72enum actions {
73 REGISTER,
74 DEREGISTER,
75 CPU_DONT_CARE
76};
Shailabh Nagarc7572492006-07-14 00:24:40 -070077
78static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
Oleg Nesterov371674852006-12-06 20:36:55 -080079 size_t size)
Shailabh Nagarc7572492006-07-14 00:24:40 -070080{
81 struct sk_buff *skb;
82 void *reply;
83
84 /*
85 * If new attributes are added, please revisit this allocation
86 */
Thomas Graf3dabc712006-11-14 19:44:52 -080087 skb = genlmsg_new(size, GFP_KERNEL);
Shailabh Nagarc7572492006-07-14 00:24:40 -070088 if (!skb)
89 return -ENOMEM;
90
91 if (!info) {
Christoph Lametercd85fc52010-12-08 17:42:22 +010092 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
Shailabh Nagarc7572492006-07-14 00:24:40 -070093
Thomas Graf17c157c2006-11-14 19:46:02 -080094 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
Shailabh Nagarc7572492006-07-14 00:24:40 -070095 } else
Thomas Graf17c157c2006-11-14 19:46:02 -080096 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
Shailabh Nagarc7572492006-07-14 00:24:40 -070097 if (reply == NULL) {
98 nlmsg_free(skb);
99 return -EINVAL;
100 }
101
102 *skbp = skb;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700103 return 0;
104}
105
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700106/*
107 * Send taskstats data in @skb to listener with nl_pid @pid
108 */
Johannes Berg134e6372009-07-10 09:51:34 +0000109static int send_reply(struct sk_buff *skb, struct genl_info *info)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700110{
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -0700111 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700112 void *reply = genlmsg_data(genlhdr);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700113 int rc;
114
Shailabh Nagarc7572492006-07-14 00:24:40 -0700115 rc = genlmsg_end(skb, reply);
116 if (rc < 0) {
117 nlmsg_free(skb);
118 return rc;
119 }
120
Johannes Berg134e6372009-07-10 09:51:34 +0000121 return genlmsg_reply(skb, info);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700122}
123
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700124/*
125 * Send taskstats data in @skb to listeners registered for @cpu's exit data
126 */
Oleg Nesterov115085e2006-12-06 20:36:51 -0800127static void send_cpu_listeners(struct sk_buff *skb,
128 struct listener_list *listeners)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700129{
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -0700130 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700131 struct listener *s, *tmp;
132 struct sk_buff *skb_next, *skb_cur = skb;
133 void *reply = genlmsg_data(genlhdr);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700134 int rc, delcount = 0;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700135
136 rc = genlmsg_end(skb, reply);
137 if (rc < 0) {
138 nlmsg_free(skb);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700139 return;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700140 }
141
142 rc = 0;
Shailabh Nagarbb129992006-07-14 00:24:47 -0700143 down_read(&listeners->sem);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700144 list_for_each_entry(s, &listeners->list, list) {
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700145 skb_next = NULL;
146 if (!list_is_last(&s->list, &listeners->list)) {
147 skb_next = skb_clone(skb_cur, GFP_KERNEL);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700148 if (!skb_next)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700149 break;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700150 }
Johannes Berg134e6372009-07-10 09:51:34 +0000151 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700152 if (rc == -ECONNREFUSED) {
Shailabh Nagarbb129992006-07-14 00:24:47 -0700153 s->valid = 0;
154 delcount++;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700155 }
156 skb_cur = skb_next;
157 }
Shailabh Nagarbb129992006-07-14 00:24:47 -0700158 up_read(&listeners->sem);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700159
Shailabh Nagard94a0412006-07-30 03:03:11 -0700160 if (skb_cur)
161 nlmsg_free(skb_cur);
162
Shailabh Nagarbb129992006-07-14 00:24:47 -0700163 if (!delcount)
Shailabh Nagard94a0412006-07-30 03:03:11 -0700164 return;
Shailabh Nagarbb129992006-07-14 00:24:47 -0700165
166 /* Delete invalidated entries */
167 down_write(&listeners->sem);
168 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
169 if (!s->valid) {
170 list_del(&s->list);
171 kfree(s);
172 }
173 }
174 up_write(&listeners->sem);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700175}
176
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700177static void fill_stats(struct task_struct *tsk, struct taskstats *stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700178{
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800179 memset(stats, 0, sizeof(*stats));
Shailabh Nagarc7572492006-07-14 00:24:40 -0700180 /*
181 * Each accounting subsystem adds calls to its functions to
182 * fill in relevant parts of struct taskstsats as follows
183 *
Shailabh Nagar7d94ddd2006-07-30 03:03:10 -0700184 * per-task-foo(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700185 */
186
Shailabh Nagar7d94ddd2006-07-30 03:03:10 -0700187 delayacct_add_tsk(stats, tsk);
Jay Lanf3cef7a2006-09-30 23:28:55 -0700188
189 /* fill in basic acct fields */
Shailabh Nagar6f449932006-07-14 00:24:41 -0700190 stats->version = TASKSTATS_VERSION;
Maxim Uvarovb663a792007-07-15 23:40:48 -0700191 stats->nvcsw = tsk->nvcsw;
192 stats->nivcsw = tsk->nivcsw;
Jay Lanf3cef7a2006-09-30 23:28:55 -0700193 bacct_add_tsk(stats, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700194
Jay Lan9acc1852006-09-30 23:28:58 -0700195 /* fill in extended acct fields */
196 xacct_add_tsk(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700197}
198
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700199static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700200{
Oleg Nesterova98b6092006-10-28 10:38:54 -0700201 struct task_struct *tsk;
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700202
203 rcu_read_lock();
204 tsk = find_task_by_vpid(pid);
205 if (tsk)
206 get_task_struct(tsk);
207 rcu_read_unlock();
208 if (!tsk)
209 return -ESRCH;
210 fill_stats(tsk, stats);
211 put_task_struct(tsk);
212 return 0;
213}
214
215static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
216{
217 struct task_struct *tsk, *first;
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700218 unsigned long flags;
Oleg Nesterova98b6092006-10-28 10:38:54 -0700219 int rc = -ESRCH;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700220
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700221 /*
222 * Add additional stats from live tasks except zombie thread group
223 * leaders who are already counted with the dead tasks
224 */
Oleg Nesterova98b6092006-10-28 10:38:54 -0700225 rcu_read_lock();
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700226 first = find_task_by_vpid(tgid);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700227
Oleg Nesterova98b6092006-10-28 10:38:54 -0700228 if (!first || !lock_task_sighand(first, &flags))
229 goto out;
230
231 if (first->signal->stats)
232 memcpy(stats, first->signal->stats, sizeof(*stats));
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800233 else
234 memset(stats, 0, sizeof(*stats));
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700235
Shailabh Nagarc7572492006-07-14 00:24:40 -0700236 tsk = first;
237 do {
Oleg Nesterovd7c3f5f2006-10-28 10:38:54 -0700238 if (tsk->exit_state)
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700239 continue;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700240 /*
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700241 * Accounting subsystem can call its functions here to
Shailabh Nagarc7572492006-07-14 00:24:40 -0700242 * fill in relevant parts of struct taskstsats as follows
243 *
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700244 * per-task-foo(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700245 */
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700246 delayacct_add_tsk(stats, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700247
Maxim Uvarovb663a792007-07-15 23:40:48 -0700248 stats->nvcsw += tsk->nvcsw;
249 stats->nivcsw += tsk->nivcsw;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700250 } while_each_thread(first, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700251
Oleg Nesterova98b6092006-10-28 10:38:54 -0700252 unlock_task_sighand(first, &flags);
253 rc = 0;
254out:
255 rcu_read_unlock();
256
257 stats->version = TASKSTATS_VERSION;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700258 /*
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200259 * Accounting subsystems can also add calls here to modify
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700260 * fields of taskstats.
Shailabh Nagarc7572492006-07-14 00:24:40 -0700261 */
Oleg Nesterova98b6092006-10-28 10:38:54 -0700262 return rc;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700263}
264
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700265static void fill_tgid_exit(struct task_struct *tsk)
266{
267 unsigned long flags;
268
Oleg Nesterovb8534d72006-10-28 10:38:53 -0700269 spin_lock_irqsave(&tsk->sighand->siglock, flags);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700270 if (!tsk->signal->stats)
271 goto ret;
272
273 /*
274 * Each accounting subsystem calls its functions here to
275 * accumalate its per-task stats for tsk, into the per-tgid structure
276 *
277 * per-task-foo(tsk->signal->stats, tsk);
278 */
279 delayacct_add_tsk(tsk->signal->stats, tsk);
280ret:
Oleg Nesterovb8534d72006-10-28 10:38:53 -0700281 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700282 return;
283}
284
Rusty Russell41c7bb92009-01-01 10:12:28 +1030285static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700286{
287 struct listener_list *listeners;
288 struct listener *s, *tmp;
289 unsigned int cpu;
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700290
Rusty Russell41c7bb92009-01-01 10:12:28 +1030291 if (!cpumask_subset(mask, cpu_possible_mask))
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700292 return -EINVAL;
293
294 if (isadd == REGISTER) {
Rusty Russell41c7bb92009-01-01 10:12:28 +1030295 for_each_cpu(cpu, mask) {
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700296 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
297 cpu_to_node(cpu));
298 if (!s)
299 goto cleanup;
300 s->pid = pid;
301 INIT_LIST_HEAD(&s->list);
Shailabh Nagarbb129992006-07-14 00:24:47 -0700302 s->valid = 1;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700303
304 listeners = &per_cpu(listener_array, cpu);
305 down_write(&listeners->sem);
306 list_add(&s->list, &listeners->list);
307 up_write(&listeners->sem);
308 }
309 return 0;
310 }
311
312 /* Deregister or cleanup */
313cleanup:
Rusty Russell41c7bb92009-01-01 10:12:28 +1030314 for_each_cpu(cpu, mask) {
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700315 listeners = &per_cpu(listener_array, cpu);
316 down_write(&listeners->sem);
317 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
318 if (s->pid == pid) {
319 list_del(&s->list);
320 kfree(s);
321 break;
322 }
323 }
324 up_write(&listeners->sem);
325 }
326 return 0;
327}
328
Rusty Russell41c7bb92009-01-01 10:12:28 +1030329static int parse(struct nlattr *na, struct cpumask *mask)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700330{
331 char *data;
332 int len;
333 int ret;
334
335 if (na == NULL)
336 return 1;
337 len = nla_len(na);
338 if (len > TASKSTATS_CPUMASK_MAXLEN)
339 return -E2BIG;
340 if (len < 1)
341 return -EINVAL;
342 data = kmalloc(len, GFP_KERNEL);
343 if (!data)
344 return -ENOMEM;
345 nla_strlcpy(data, na, len);
Rusty Russell29c01772008-12-13 21:20:25 +1030346 ret = cpulist_parse(data, mask);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700347 kfree(data);
348 return ret;
349}
350
Jeff Mahoney9ab020c2011-01-12 17:00:48 -0800351#if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800352#define TASKSTATS_NEEDS_PADDING 1
353#endif
354
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800355static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
Oleg Nesterov68062b82006-12-06 20:36:53 -0800356{
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800357 struct nlattr *na, *ret;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800358 int aggr;
359
Oleg Nesterov371674852006-12-06 20:36:55 -0800360 aggr = (type == TASKSTATS_TYPE_PID)
361 ? TASKSTATS_TYPE_AGGR_PID
362 : TASKSTATS_TYPE_AGGR_TGID;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800363
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800364 /*
365 * The taskstats structure is internally aligned on 8 byte
366 * boundaries but the layout of the aggregrate reply, with
367 * two NLA headers and the pid (each 4 bytes), actually
368 * force the entire structure to be unaligned. This causes
369 * the kernel to issue unaligned access warnings on some
370 * architectures like ia64. Unfortunately, some software out there
371 * doesn't properly unroll the NLA packet and assumes that the start
372 * of the taskstats structure will always be 20 bytes from the start
373 * of the netlink payload. Aligning the start of the taskstats
374 * structure breaks this software, which we don't want. So, for now
375 * the alignment only happens on architectures that require it
376 * and those users will have to update to fixed versions of those
377 * packages. Space is reserved in the packet only when needed.
378 * This ifdef should be removed in several years e.g. 2012 once
379 * we can be confident that fixed versions are installed on most
380 * systems. We add the padding before the aggregate since the
381 * aggregate is already a defined type.
382 */
383#ifdef TASKSTATS_NEEDS_PADDING
384 if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
385 goto err;
386#endif
Oleg Nesterov68062b82006-12-06 20:36:53 -0800387 na = nla_nest_start(skb, aggr);
Oleg Nesterov371674852006-12-06 20:36:55 -0800388 if (!na)
389 goto err;
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800390
391 if (nla_put(skb, type, sizeof(pid), &pid) < 0)
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800392 goto err;
393 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
394 if (!ret)
395 goto err;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800396 nla_nest_end(skb, na);
397
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800398 return nla_data(ret);
399err:
400 return NULL;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800401}
402
Balbir Singh846c7bb2007-10-18 23:39:44 -0700403static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
404{
405 int rc = 0;
406 struct sk_buff *rep_skb;
407 struct cgroupstats *stats;
408 struct nlattr *na;
409 size_t size;
410 u32 fd;
411 struct file *file;
412 int fput_needed;
413
414 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
415 if (!na)
416 return -EINVAL;
417
418 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
419 file = fget_light(fd, &fput_needed);
Adrian Bunkf9615982007-11-14 17:00:37 -0800420 if (!file)
421 return 0;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700422
Adrian Bunkf9615982007-11-14 17:00:37 -0800423 size = nla_total_size(sizeof(struct cgroupstats));
Balbir Singh846c7bb2007-10-18 23:39:44 -0700424
Adrian Bunkf9615982007-11-14 17:00:37 -0800425 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
426 size);
427 if (rc < 0)
428 goto err;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700429
Adrian Bunkf9615982007-11-14 17:00:37 -0800430 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
431 sizeof(struct cgroupstats));
432 stats = nla_data(na);
433 memset(stats, 0, sizeof(*stats));
Balbir Singh846c7bb2007-10-18 23:39:44 -0700434
Adrian Bunkf9615982007-11-14 17:00:37 -0800435 rc = cgroupstats_build(stats, file->f_dentry);
436 if (rc < 0) {
437 nlmsg_free(rep_skb);
438 goto err;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700439 }
440
Johannes Berg134e6372009-07-10 09:51:34 +0000441 rc = send_reply(rep_skb, info);
Adrian Bunkf9615982007-11-14 17:00:37 -0800442
Balbir Singh846c7bb2007-10-18 23:39:44 -0700443err:
Adrian Bunkf9615982007-11-14 17:00:37 -0800444 fput_light(file, fput_needed);
Balbir Singh846c7bb2007-10-18 23:39:44 -0700445 return rc;
446}
447
Michael Holzheu93233122010-10-27 15:34:44 -0700448static int cmd_attr_register_cpumask(struct genl_info *info)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700449{
Rusty Russell41c7bb92009-01-01 10:12:28 +1030450 cpumask_var_t mask;
Michael Holzheu93233122010-10-27 15:34:44 -0700451 int rc;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700452
Rusty Russell41c7bb92009-01-01 10:12:28 +1030453 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
454 return -ENOMEM;
Rusty Russell41c7bb92009-01-01 10:12:28 +1030455 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700456 if (rc < 0)
Michael Holzheu93233122010-10-27 15:34:44 -0700457 goto out;
458 rc = add_del_listener(info->snd_pid, mask, REGISTER);
459out:
460 free_cpumask_var(mask);
461 return rc;
462}
Rusty Russell41c7bb92009-01-01 10:12:28 +1030463
Michael Holzheu93233122010-10-27 15:34:44 -0700464static int cmd_attr_deregister_cpumask(struct genl_info *info)
465{
466 cpumask_var_t mask;
467 int rc;
468
469 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
470 return -ENOMEM;
Rusty Russell41c7bb92009-01-01 10:12:28 +1030471 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
472 if (rc < 0)
Michael Holzheu93233122010-10-27 15:34:44 -0700473 goto out;
474 rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
475out:
Rusty Russell41c7bb92009-01-01 10:12:28 +1030476 free_cpumask_var(mask);
Michael Holzheu93233122010-10-27 15:34:44 -0700477 return rc;
478}
Shailabh Nagarc7572492006-07-14 00:24:40 -0700479
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800480static size_t taskstats_packet_size(void)
481{
482 size_t size;
483
484 size = nla_total_size(sizeof(u32)) +
485 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
486#ifdef TASKSTATS_NEEDS_PADDING
487 size += nla_total_size(0); /* Padding for alignment */
488#endif
489 return size;
490}
491
Michael Holzheu93233122010-10-27 15:34:44 -0700492static int cmd_attr_pid(struct genl_info *info)
493{
494 struct taskstats *stats;
495 struct sk_buff *rep_skb;
496 size_t size;
497 u32 pid;
498 int rc;
499
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800500 size = taskstats_packet_size();
Shailabh Nagarc7572492006-07-14 00:24:40 -0700501
Oleg Nesterov371674852006-12-06 20:36:55 -0800502 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700503 if (rc < 0)
504 return rc;
505
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800506 rc = -EINVAL;
Michael Holzheu93233122010-10-27 15:34:44 -0700507 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
508 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
509 if (!stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700510 goto err;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700511
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700512 rc = fill_stats_for_pid(pid, stats);
Michael Holzheu93233122010-10-27 15:34:44 -0700513 if (rc < 0)
514 goto err;
Johannes Berg134e6372009-07-10 09:51:34 +0000515 return send_reply(rep_skb, info);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700516err:
517 nlmsg_free(rep_skb);
518 return rc;
519}
520
Michael Holzheu93233122010-10-27 15:34:44 -0700521static int cmd_attr_tgid(struct genl_info *info)
522{
523 struct taskstats *stats;
524 struct sk_buff *rep_skb;
525 size_t size;
526 u32 tgid;
527 int rc;
528
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800529 size = taskstats_packet_size();
Michael Holzheu93233122010-10-27 15:34:44 -0700530
531 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
532 if (rc < 0)
533 return rc;
534
535 rc = -EINVAL;
536 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
537 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
538 if (!stats)
539 goto err;
540
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700541 rc = fill_stats_for_tgid(tgid, stats);
Michael Holzheu93233122010-10-27 15:34:44 -0700542 if (rc < 0)
543 goto err;
544 return send_reply(rep_skb, info);
545err:
546 nlmsg_free(rep_skb);
547 return rc;
548}
549
550static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
551{
552 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
553 return cmd_attr_register_cpumask(info);
554 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
555 return cmd_attr_deregister_cpumask(info);
556 else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
557 return cmd_attr_pid(info);
558 else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
559 return cmd_attr_tgid(info);
560 else
561 return -EINVAL;
562}
563
Oleg Nesterov34ec1232006-12-06 20:36:52 -0800564static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
565{
566 struct signal_struct *sig = tsk->signal;
567 struct taskstats *stats;
568
569 if (sig->stats || thread_group_empty(tsk))
570 goto ret;
571
572 /* No problem if kmem_cache_zalloc() fails */
573 stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
574
575 spin_lock_irq(&tsk->sighand->siglock);
576 if (!sig->stats) {
577 sig->stats = stats;
578 stats = NULL;
579 }
580 spin_unlock_irq(&tsk->sighand->siglock);
581
582 if (stats)
583 kmem_cache_free(taskstats_cache, stats);
584ret:
585 return sig->stats;
586}
587
Shailabh Nagarc7572492006-07-14 00:24:40 -0700588/* Send pid data out on exit */
Oleg Nesterov115085e2006-12-06 20:36:51 -0800589void taskstats_exit(struct task_struct *tsk, int group_dead)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700590{
591 int rc;
Oleg Nesterov115085e2006-12-06 20:36:51 -0800592 struct listener_list *listeners;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800593 struct taskstats *stats;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700594 struct sk_buff *rep_skb;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700595 size_t size;
596 int is_thread_group;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700597
Oleg Nesterov4a279ff2006-10-30 22:07:15 -0800598 if (!family_registered)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700599 return;
600
Shailabh Nagarc7572492006-07-14 00:24:40 -0700601 /*
602 * Size includes space for nested attributes
603 */
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800604 size = taskstats_packet_size();
Shailabh Nagarc7572492006-07-14 00:24:40 -0700605
Oleg Nesterov34ec1232006-12-06 20:36:52 -0800606 is_thread_group = !!taskstats_tgid_alloc(tsk);
Oleg Nesterov4a279ff2006-10-30 22:07:15 -0800607 if (is_thread_group) {
608 /* PID + STATS + TGID + STATS */
609 size = 2 * size;
610 /* fill the tsk->signal->stats structure */
611 fill_tgid_exit(tsk);
612 }
613
Christoph Lametercd85fc52010-12-08 17:42:22 +0100614 listeners = __this_cpu_ptr(&listener_array);
Oleg Nesterov115085e2006-12-06 20:36:51 -0800615 if (list_empty(&listeners->list))
616 return;
617
Oleg Nesterov371674852006-12-06 20:36:55 -0800618 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700619 if (rc < 0)
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800620 return;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700621
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800622 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid);
623 if (!stats)
Oleg Nesterov371674852006-12-06 20:36:55 -0800624 goto err;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800625
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700626 fill_stats(tsk, stats);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700627
Shailabh Nagarc7572492006-07-14 00:24:40 -0700628 /*
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700629 * Doesn't matter if tsk is the leader or the last group member leaving
Shailabh Nagarc7572492006-07-14 00:24:40 -0700630 */
Oleg Nesterov68062b82006-12-06 20:36:53 -0800631 if (!is_thread_group || !group_dead)
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700632 goto send;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700633
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800634 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid);
635 if (!stats)
Oleg Nesterov371674852006-12-06 20:36:55 -0800636 goto err;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800637
638 memcpy(stats, tsk->signal->stats, sizeof(*stats));
Shailabh Nagarc7572492006-07-14 00:24:40 -0700639
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700640send:
Oleg Nesterov115085e2006-12-06 20:36:51 -0800641 send_cpu_listeners(rep_skb, listeners);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700642 return;
Oleg Nesterov371674852006-12-06 20:36:55 -0800643err:
Shailabh Nagarc7572492006-07-14 00:24:40 -0700644 nlmsg_free(rep_skb);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700645}
646
647static struct genl_ops taskstats_ops = {
648 .cmd = TASKSTATS_CMD_GET,
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700649 .doit = taskstats_user_cmd,
Shailabh Nagarc7572492006-07-14 00:24:40 -0700650 .policy = taskstats_cmd_get_policy,
651};
652
Balbir Singh846c7bb2007-10-18 23:39:44 -0700653static struct genl_ops cgroupstats_ops = {
654 .cmd = CGROUPSTATS_CMD_GET,
655 .doit = cgroupstats_user_cmd,
656 .policy = cgroupstats_cmd_get_policy,
657};
658
Shailabh Nagarc7572492006-07-14 00:24:40 -0700659/* Needed early in initialization */
660void __init taskstats_init_early(void)
661{
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700662 unsigned int i;
663
Christoph Lameter0a31bd52007-05-06 14:49:57 -0700664 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700665 for_each_possible_cpu(i) {
666 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
667 init_rwsem(&(per_cpu(listener_array, i).sem));
668 }
Shailabh Nagarc7572492006-07-14 00:24:40 -0700669}
670
671static int __init taskstats_init(void)
672{
673 int rc;
674
675 rc = genl_register_family(&family);
676 if (rc)
677 return rc;
678
679 rc = genl_register_ops(&family, &taskstats_ops);
680 if (rc < 0)
681 goto err;
682
Balbir Singh846c7bb2007-10-18 23:39:44 -0700683 rc = genl_register_ops(&family, &cgroupstats_ops);
684 if (rc < 0)
685 goto err_cgroup_ops;
686
Shailabh Nagarc7572492006-07-14 00:24:40 -0700687 family_registered = 1;
Mandeep Singh Bainesf9b182e2011-03-23 16:43:27 -0700688 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700689 return 0;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700690err_cgroup_ops:
691 genl_unregister_ops(&family, &taskstats_ops);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700692err:
693 genl_unregister_family(&family);
694 return rc;
695}
696
697/*
698 * late initcall ensures initialization of statistics collection
699 * mechanisms precedes initialization of the taskstats interface
700 */
701late_initcall(taskstats_init);