blob: fc0f220054172bb4a0f8b1edcd0f3cf9b50202f2 [file] [log] [blame]
Shailabh Nagarc7572492006-07-14 00:24:40 -07001/*
2 * taskstats.c - Export per-task statistics to userland
3 *
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/taskstats_kern.h>
Jay Lanf3cef7a2006-09-30 23:28:55 -070021#include <linux/tsacct_kern.h>
Shailabh Nagar6f449932006-07-14 00:24:41 -070022#include <linux/delayacct.h>
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070023#include <linux/cpumask.h>
24#include <linux/percpu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Balbir Singh846c7bb2007-10-18 23:39:44 -070026#include <linux/cgroupstats.h>
27#include <linux/cgroup.h>
28#include <linux/fs.h>
29#include <linux/file.h>
Shailabh Nagarc7572492006-07-14 00:24:40 -070030#include <net/genetlink.h>
31#include <asm/atomic.h>
32
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070033/*
34 * Maximum length of a cpumask that can be specified in
35 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
36 */
37#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
38
Vegard Nossumb81f3ea2008-07-25 01:48:55 -070039static DEFINE_PER_CPU(__u32, taskstats_seqnum);
Shailabh Nagarc7572492006-07-14 00:24:40 -070040static int family_registered;
Christoph Lametere18b8902006-12-06 20:33:20 -080041struct kmem_cache *taskstats_cache;
Shailabh Nagarc7572492006-07-14 00:24:40 -070042
43static struct genl_family family = {
44 .id = GENL_ID_GENERATE,
45 .name = TASKSTATS_GENL_NAME,
46 .version = TASKSTATS_GENL_VERSION,
47 .maxattr = TASKSTATS_CMD_ATTR_MAX,
48};
49
Alexey Dobriyanb54452b2010-02-18 08:14:31 +000050static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
Shailabh Nagarc7572492006-07-14 00:24:40 -070051 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
52 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070053 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
54 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
55
Alexey Dobriyanb54452b2010-02-18 08:14:31 +000056static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
Balbir Singh846c7bb2007-10-18 23:39:44 -070057 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
58};
59
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070060struct listener {
61 struct list_head list;
62 pid_t pid;
Shailabh Nagarbb129992006-07-14 00:24:47 -070063 char valid;
Shailabh Nagarc7572492006-07-14 00:24:40 -070064};
65
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070066struct listener_list {
67 struct rw_semaphore sem;
68 struct list_head list;
69};
70static DEFINE_PER_CPU(struct listener_list, listener_array);
71
72enum actions {
73 REGISTER,
74 DEREGISTER,
75 CPU_DONT_CARE
76};
Shailabh Nagarc7572492006-07-14 00:24:40 -070077
78static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
Oleg Nesterov371674852006-12-06 20:36:55 -080079 size_t size)
Shailabh Nagarc7572492006-07-14 00:24:40 -070080{
81 struct sk_buff *skb;
82 void *reply;
83
84 /*
85 * If new attributes are added, please revisit this allocation
86 */
Thomas Graf3dabc712006-11-14 19:44:52 -080087 skb = genlmsg_new(size, GFP_KERNEL);
Shailabh Nagarc7572492006-07-14 00:24:40 -070088 if (!skb)
89 return -ENOMEM;
90
91 if (!info) {
Christoph Lametercd85fc52010-12-08 17:42:22 +010092 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
Shailabh Nagarc7572492006-07-14 00:24:40 -070093
Thomas Graf17c157c2006-11-14 19:46:02 -080094 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
Shailabh Nagarc7572492006-07-14 00:24:40 -070095 } else
Thomas Graf17c157c2006-11-14 19:46:02 -080096 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
Shailabh Nagarc7572492006-07-14 00:24:40 -070097 if (reply == NULL) {
98 nlmsg_free(skb);
99 return -EINVAL;
100 }
101
102 *skbp = skb;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700103 return 0;
104}
105
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700106/*
107 * Send taskstats data in @skb to listener with nl_pid @pid
108 */
Johannes Berg134e6372009-07-10 09:51:34 +0000109static int send_reply(struct sk_buff *skb, struct genl_info *info)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700110{
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -0700111 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700112 void *reply = genlmsg_data(genlhdr);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700113 int rc;
114
Shailabh Nagarc7572492006-07-14 00:24:40 -0700115 rc = genlmsg_end(skb, reply);
116 if (rc < 0) {
117 nlmsg_free(skb);
118 return rc;
119 }
120
Johannes Berg134e6372009-07-10 09:51:34 +0000121 return genlmsg_reply(skb, info);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700122}
123
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700124/*
125 * Send taskstats data in @skb to listeners registered for @cpu's exit data
126 */
Oleg Nesterov115085e2006-12-06 20:36:51 -0800127static void send_cpu_listeners(struct sk_buff *skb,
128 struct listener_list *listeners)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700129{
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -0700130 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700131 struct listener *s, *tmp;
132 struct sk_buff *skb_next, *skb_cur = skb;
133 void *reply = genlmsg_data(genlhdr);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700134 int rc, delcount = 0;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700135
136 rc = genlmsg_end(skb, reply);
137 if (rc < 0) {
138 nlmsg_free(skb);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700139 return;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700140 }
141
142 rc = 0;
Shailabh Nagarbb129992006-07-14 00:24:47 -0700143 down_read(&listeners->sem);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700144 list_for_each_entry(s, &listeners->list, list) {
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700145 skb_next = NULL;
146 if (!list_is_last(&s->list, &listeners->list)) {
147 skb_next = skb_clone(skb_cur, GFP_KERNEL);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700148 if (!skb_next)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700149 break;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700150 }
Johannes Berg134e6372009-07-10 09:51:34 +0000151 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700152 if (rc == -ECONNREFUSED) {
Shailabh Nagarbb129992006-07-14 00:24:47 -0700153 s->valid = 0;
154 delcount++;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700155 }
156 skb_cur = skb_next;
157 }
Shailabh Nagarbb129992006-07-14 00:24:47 -0700158 up_read(&listeners->sem);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700159
Shailabh Nagard94a0412006-07-30 03:03:11 -0700160 if (skb_cur)
161 nlmsg_free(skb_cur);
162
Shailabh Nagarbb129992006-07-14 00:24:47 -0700163 if (!delcount)
Shailabh Nagard94a0412006-07-30 03:03:11 -0700164 return;
Shailabh Nagarbb129992006-07-14 00:24:47 -0700165
166 /* Delete invalidated entries */
167 down_write(&listeners->sem);
168 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
169 if (!s->valid) {
170 list_del(&s->list);
171 kfree(s);
172 }
173 }
174 up_write(&listeners->sem);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700175}
176
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700177static void fill_stats(struct task_struct *tsk, struct taskstats *stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700178{
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800179 memset(stats, 0, sizeof(*stats));
Shailabh Nagarc7572492006-07-14 00:24:40 -0700180 /*
181 * Each accounting subsystem adds calls to its functions to
182 * fill in relevant parts of struct taskstsats as follows
183 *
Shailabh Nagar7d94ddd2006-07-30 03:03:10 -0700184 * per-task-foo(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700185 */
186
Shailabh Nagar7d94ddd2006-07-30 03:03:10 -0700187 delayacct_add_tsk(stats, tsk);
Jay Lanf3cef7a2006-09-30 23:28:55 -0700188
189 /* fill in basic acct fields */
Shailabh Nagar6f449932006-07-14 00:24:41 -0700190 stats->version = TASKSTATS_VERSION;
Maxim Uvarovb663a792007-07-15 23:40:48 -0700191 stats->nvcsw = tsk->nvcsw;
192 stats->nivcsw = tsk->nivcsw;
Jay Lanf3cef7a2006-09-30 23:28:55 -0700193 bacct_add_tsk(stats, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700194
Jay Lan9acc1852006-09-30 23:28:58 -0700195 /* fill in extended acct fields */
196 xacct_add_tsk(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700197}
198
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700199static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700200{
Oleg Nesterova98b6092006-10-28 10:38:54 -0700201 struct task_struct *tsk;
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700202
203 rcu_read_lock();
204 tsk = find_task_by_vpid(pid);
205 if (tsk)
206 get_task_struct(tsk);
207 rcu_read_unlock();
208 if (!tsk)
209 return -ESRCH;
210 fill_stats(tsk, stats);
211 put_task_struct(tsk);
212 return 0;
213}
214
215static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
216{
217 struct task_struct *tsk, *first;
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700218 unsigned long flags;
Oleg Nesterova98b6092006-10-28 10:38:54 -0700219 int rc = -ESRCH;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700220
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700221 /*
222 * Add additional stats from live tasks except zombie thread group
223 * leaders who are already counted with the dead tasks
224 */
Oleg Nesterova98b6092006-10-28 10:38:54 -0700225 rcu_read_lock();
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700226 first = find_task_by_vpid(tgid);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700227
Oleg Nesterova98b6092006-10-28 10:38:54 -0700228 if (!first || !lock_task_sighand(first, &flags))
229 goto out;
230
231 if (first->signal->stats)
232 memcpy(stats, first->signal->stats, sizeof(*stats));
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800233 else
234 memset(stats, 0, sizeof(*stats));
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700235
Shailabh Nagarc7572492006-07-14 00:24:40 -0700236 tsk = first;
237 do {
Oleg Nesterovd7c3f5f2006-10-28 10:38:54 -0700238 if (tsk->exit_state)
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700239 continue;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700240 /*
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700241 * Accounting subsystem can call its functions here to
Shailabh Nagarc7572492006-07-14 00:24:40 -0700242 * fill in relevant parts of struct taskstsats as follows
243 *
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700244 * per-task-foo(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700245 */
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700246 delayacct_add_tsk(stats, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700247
Maxim Uvarovb663a792007-07-15 23:40:48 -0700248 stats->nvcsw += tsk->nvcsw;
249 stats->nivcsw += tsk->nivcsw;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700250 } while_each_thread(first, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700251
Oleg Nesterova98b6092006-10-28 10:38:54 -0700252 unlock_task_sighand(first, &flags);
253 rc = 0;
254out:
255 rcu_read_unlock();
256
257 stats->version = TASKSTATS_VERSION;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700258 /*
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200259 * Accounting subsystems can also add calls here to modify
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700260 * fields of taskstats.
Shailabh Nagarc7572492006-07-14 00:24:40 -0700261 */
Oleg Nesterova98b6092006-10-28 10:38:54 -0700262 return rc;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700263}
264
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700265static void fill_tgid_exit(struct task_struct *tsk)
266{
267 unsigned long flags;
268
Oleg Nesterovb8534d72006-10-28 10:38:53 -0700269 spin_lock_irqsave(&tsk->sighand->siglock, flags);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700270 if (!tsk->signal->stats)
271 goto ret;
272
273 /*
274 * Each accounting subsystem calls its functions here to
275 * accumalate its per-task stats for tsk, into the per-tgid structure
276 *
277 * per-task-foo(tsk->signal->stats, tsk);
278 */
279 delayacct_add_tsk(tsk->signal->stats, tsk);
280ret:
Oleg Nesterovb8534d72006-10-28 10:38:53 -0700281 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700282 return;
283}
284
Rusty Russell41c7bb92009-01-01 10:12:28 +1030285static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700286{
287 struct listener_list *listeners;
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700288 struct listener *s, *tmp, *s2;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700289 unsigned int cpu;
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700290
Rusty Russell41c7bb92009-01-01 10:12:28 +1030291 if (!cpumask_subset(mask, cpu_possible_mask))
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700292 return -EINVAL;
293
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700294 s = NULL;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700295 if (isadd == REGISTER) {
Rusty Russell41c7bb92009-01-01 10:12:28 +1030296 for_each_cpu(cpu, mask) {
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700297 if (!s)
298 s = kmalloc_node(sizeof(struct listener),
299 GFP_KERNEL, cpu_to_node(cpu));
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700300 if (!s)
301 goto cleanup;
302 s->pid = pid;
303 INIT_LIST_HEAD(&s->list);
Shailabh Nagarbb129992006-07-14 00:24:47 -0700304 s->valid = 1;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700305
306 listeners = &per_cpu(listener_array, cpu);
307 down_write(&listeners->sem);
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700308 list_for_each_entry_safe(s2, tmp, &listeners->list, list) {
309 if (s2->pid == pid)
310 goto next_cpu;
311 }
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700312 list_add(&s->list, &listeners->list);
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700313 s = NULL;
314next_cpu:
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700315 up_write(&listeners->sem);
316 }
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700317 kfree(s);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700318 return 0;
319 }
320
321 /* Deregister or cleanup */
322cleanup:
Rusty Russell41c7bb92009-01-01 10:12:28 +1030323 for_each_cpu(cpu, mask) {
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700324 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
327 if (s->pid == pid) {
328 list_del(&s->list);
329 kfree(s);
330 break;
331 }
332 }
333 up_write(&listeners->sem);
334 }
335 return 0;
336}
337
Rusty Russell41c7bb92009-01-01 10:12:28 +1030338static int parse(struct nlattr *na, struct cpumask *mask)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700339{
340 char *data;
341 int len;
342 int ret;
343
344 if (na == NULL)
345 return 1;
346 len = nla_len(na);
347 if (len > TASKSTATS_CPUMASK_MAXLEN)
348 return -E2BIG;
349 if (len < 1)
350 return -EINVAL;
351 data = kmalloc(len, GFP_KERNEL);
352 if (!data)
353 return -ENOMEM;
354 nla_strlcpy(data, na, len);
Rusty Russell29c01772008-12-13 21:20:25 +1030355 ret = cpulist_parse(data, mask);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700356 kfree(data);
357 return ret;
358}
359
Jeff Mahoney9ab020c2011-01-12 17:00:48 -0800360#if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800361#define TASKSTATS_NEEDS_PADDING 1
362#endif
363
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800364static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
Oleg Nesterov68062b82006-12-06 20:36:53 -0800365{
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800366 struct nlattr *na, *ret;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800367 int aggr;
368
Oleg Nesterov371674852006-12-06 20:36:55 -0800369 aggr = (type == TASKSTATS_TYPE_PID)
370 ? TASKSTATS_TYPE_AGGR_PID
371 : TASKSTATS_TYPE_AGGR_TGID;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800372
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800373 /*
374 * The taskstats structure is internally aligned on 8 byte
375 * boundaries but the layout of the aggregrate reply, with
376 * two NLA headers and the pid (each 4 bytes), actually
377 * force the entire structure to be unaligned. This causes
378 * the kernel to issue unaligned access warnings on some
379 * architectures like ia64. Unfortunately, some software out there
380 * doesn't properly unroll the NLA packet and assumes that the start
381 * of the taskstats structure will always be 20 bytes from the start
382 * of the netlink payload. Aligning the start of the taskstats
383 * structure breaks this software, which we don't want. So, for now
384 * the alignment only happens on architectures that require it
385 * and those users will have to update to fixed versions of those
386 * packages. Space is reserved in the packet only when needed.
387 * This ifdef should be removed in several years e.g. 2012 once
388 * we can be confident that fixed versions are installed on most
389 * systems. We add the padding before the aggregate since the
390 * aggregate is already a defined type.
391 */
392#ifdef TASKSTATS_NEEDS_PADDING
393 if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
394 goto err;
395#endif
Oleg Nesterov68062b82006-12-06 20:36:53 -0800396 na = nla_nest_start(skb, aggr);
Oleg Nesterov371674852006-12-06 20:36:55 -0800397 if (!na)
398 goto err;
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800399
400 if (nla_put(skb, type, sizeof(pid), &pid) < 0)
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800401 goto err;
402 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
403 if (!ret)
404 goto err;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800405 nla_nest_end(skb, na);
406
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800407 return nla_data(ret);
408err:
409 return NULL;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800410}
411
Balbir Singh846c7bb2007-10-18 23:39:44 -0700412static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
413{
414 int rc = 0;
415 struct sk_buff *rep_skb;
416 struct cgroupstats *stats;
417 struct nlattr *na;
418 size_t size;
419 u32 fd;
420 struct file *file;
421 int fput_needed;
422
423 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
424 if (!na)
425 return -EINVAL;
426
427 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
428 file = fget_light(fd, &fput_needed);
Adrian Bunkf9615982007-11-14 17:00:37 -0800429 if (!file)
430 return 0;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700431
Adrian Bunkf9615982007-11-14 17:00:37 -0800432 size = nla_total_size(sizeof(struct cgroupstats));
Balbir Singh846c7bb2007-10-18 23:39:44 -0700433
Adrian Bunkf9615982007-11-14 17:00:37 -0800434 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
435 size);
436 if (rc < 0)
437 goto err;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700438
Adrian Bunkf9615982007-11-14 17:00:37 -0800439 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
440 sizeof(struct cgroupstats));
441 stats = nla_data(na);
442 memset(stats, 0, sizeof(*stats));
Balbir Singh846c7bb2007-10-18 23:39:44 -0700443
Adrian Bunkf9615982007-11-14 17:00:37 -0800444 rc = cgroupstats_build(stats, file->f_dentry);
445 if (rc < 0) {
446 nlmsg_free(rep_skb);
447 goto err;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700448 }
449
Johannes Berg134e6372009-07-10 09:51:34 +0000450 rc = send_reply(rep_skb, info);
Adrian Bunkf9615982007-11-14 17:00:37 -0800451
Balbir Singh846c7bb2007-10-18 23:39:44 -0700452err:
Adrian Bunkf9615982007-11-14 17:00:37 -0800453 fput_light(file, fput_needed);
Balbir Singh846c7bb2007-10-18 23:39:44 -0700454 return rc;
455}
456
Michael Holzheu93233122010-10-27 15:34:44 -0700457static int cmd_attr_register_cpumask(struct genl_info *info)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700458{
Rusty Russell41c7bb92009-01-01 10:12:28 +1030459 cpumask_var_t mask;
Michael Holzheu93233122010-10-27 15:34:44 -0700460 int rc;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700461
Rusty Russell41c7bb92009-01-01 10:12:28 +1030462 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
463 return -ENOMEM;
Rusty Russell41c7bb92009-01-01 10:12:28 +1030464 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700465 if (rc < 0)
Michael Holzheu93233122010-10-27 15:34:44 -0700466 goto out;
467 rc = add_del_listener(info->snd_pid, mask, REGISTER);
468out:
469 free_cpumask_var(mask);
470 return rc;
471}
Rusty Russell41c7bb92009-01-01 10:12:28 +1030472
Michael Holzheu93233122010-10-27 15:34:44 -0700473static int cmd_attr_deregister_cpumask(struct genl_info *info)
474{
475 cpumask_var_t mask;
476 int rc;
477
478 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
479 return -ENOMEM;
Rusty Russell41c7bb92009-01-01 10:12:28 +1030480 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
481 if (rc < 0)
Michael Holzheu93233122010-10-27 15:34:44 -0700482 goto out;
483 rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
484out:
Rusty Russell41c7bb92009-01-01 10:12:28 +1030485 free_cpumask_var(mask);
Michael Holzheu93233122010-10-27 15:34:44 -0700486 return rc;
487}
Shailabh Nagarc7572492006-07-14 00:24:40 -0700488
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800489static size_t taskstats_packet_size(void)
490{
491 size_t size;
492
493 size = nla_total_size(sizeof(u32)) +
494 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
495#ifdef TASKSTATS_NEEDS_PADDING
496 size += nla_total_size(0); /* Padding for alignment */
497#endif
498 return size;
499}
500
Michael Holzheu93233122010-10-27 15:34:44 -0700501static int cmd_attr_pid(struct genl_info *info)
502{
503 struct taskstats *stats;
504 struct sk_buff *rep_skb;
505 size_t size;
506 u32 pid;
507 int rc;
508
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800509 size = taskstats_packet_size();
Shailabh Nagarc7572492006-07-14 00:24:40 -0700510
Oleg Nesterov371674852006-12-06 20:36:55 -0800511 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700512 if (rc < 0)
513 return rc;
514
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800515 rc = -EINVAL;
Michael Holzheu93233122010-10-27 15:34:44 -0700516 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
517 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
518 if (!stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700519 goto err;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700520
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700521 rc = fill_stats_for_pid(pid, stats);
Michael Holzheu93233122010-10-27 15:34:44 -0700522 if (rc < 0)
523 goto err;
Johannes Berg134e6372009-07-10 09:51:34 +0000524 return send_reply(rep_skb, info);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700525err:
526 nlmsg_free(rep_skb);
527 return rc;
528}
529
Michael Holzheu93233122010-10-27 15:34:44 -0700530static int cmd_attr_tgid(struct genl_info *info)
531{
532 struct taskstats *stats;
533 struct sk_buff *rep_skb;
534 size_t size;
535 u32 tgid;
536 int rc;
537
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800538 size = taskstats_packet_size();
Michael Holzheu93233122010-10-27 15:34:44 -0700539
540 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
541 if (rc < 0)
542 return rc;
543
544 rc = -EINVAL;
545 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
546 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
547 if (!stats)
548 goto err;
549
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700550 rc = fill_stats_for_tgid(tgid, stats);
Michael Holzheu93233122010-10-27 15:34:44 -0700551 if (rc < 0)
552 goto err;
553 return send_reply(rep_skb, info);
554err:
555 nlmsg_free(rep_skb);
556 return rc;
557}
558
559static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
560{
561 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
562 return cmd_attr_register_cpumask(info);
563 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
564 return cmd_attr_deregister_cpumask(info);
565 else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
566 return cmd_attr_pid(info);
567 else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
568 return cmd_attr_tgid(info);
569 else
570 return -EINVAL;
571}
572
Oleg Nesterov34ec1232006-12-06 20:36:52 -0800573static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
574{
575 struct signal_struct *sig = tsk->signal;
576 struct taskstats *stats;
577
578 if (sig->stats || thread_group_empty(tsk))
579 goto ret;
580
581 /* No problem if kmem_cache_zalloc() fails */
582 stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
583
584 spin_lock_irq(&tsk->sighand->siglock);
585 if (!sig->stats) {
586 sig->stats = stats;
587 stats = NULL;
588 }
589 spin_unlock_irq(&tsk->sighand->siglock);
590
591 if (stats)
592 kmem_cache_free(taskstats_cache, stats);
593ret:
594 return sig->stats;
595}
596
Shailabh Nagarc7572492006-07-14 00:24:40 -0700597/* Send pid data out on exit */
Oleg Nesterov115085e2006-12-06 20:36:51 -0800598void taskstats_exit(struct task_struct *tsk, int group_dead)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700599{
600 int rc;
Oleg Nesterov115085e2006-12-06 20:36:51 -0800601 struct listener_list *listeners;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800602 struct taskstats *stats;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700603 struct sk_buff *rep_skb;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700604 size_t size;
605 int is_thread_group;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700606
Oleg Nesterov4a279ff2006-10-30 22:07:15 -0800607 if (!family_registered)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700608 return;
609
Shailabh Nagarc7572492006-07-14 00:24:40 -0700610 /*
611 * Size includes space for nested attributes
612 */
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800613 size = taskstats_packet_size();
Shailabh Nagarc7572492006-07-14 00:24:40 -0700614
Oleg Nesterov34ec1232006-12-06 20:36:52 -0800615 is_thread_group = !!taskstats_tgid_alloc(tsk);
Oleg Nesterov4a279ff2006-10-30 22:07:15 -0800616 if (is_thread_group) {
617 /* PID + STATS + TGID + STATS */
618 size = 2 * size;
619 /* fill the tsk->signal->stats structure */
620 fill_tgid_exit(tsk);
621 }
622
Christoph Lametercd85fc52010-12-08 17:42:22 +0100623 listeners = __this_cpu_ptr(&listener_array);
Oleg Nesterov115085e2006-12-06 20:36:51 -0800624 if (list_empty(&listeners->list))
625 return;
626
Oleg Nesterov371674852006-12-06 20:36:55 -0800627 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700628 if (rc < 0)
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800629 return;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700630
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800631 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid);
632 if (!stats)
Oleg Nesterov371674852006-12-06 20:36:55 -0800633 goto err;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800634
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700635 fill_stats(tsk, stats);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700636
Shailabh Nagarc7572492006-07-14 00:24:40 -0700637 /*
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700638 * Doesn't matter if tsk is the leader or the last group member leaving
Shailabh Nagarc7572492006-07-14 00:24:40 -0700639 */
Oleg Nesterov68062b82006-12-06 20:36:53 -0800640 if (!is_thread_group || !group_dead)
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700641 goto send;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700642
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800643 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid);
644 if (!stats)
Oleg Nesterov371674852006-12-06 20:36:55 -0800645 goto err;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800646
647 memcpy(stats, tsk->signal->stats, sizeof(*stats));
Shailabh Nagarc7572492006-07-14 00:24:40 -0700648
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700649send:
Oleg Nesterov115085e2006-12-06 20:36:51 -0800650 send_cpu_listeners(rep_skb, listeners);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700651 return;
Oleg Nesterov371674852006-12-06 20:36:55 -0800652err:
Shailabh Nagarc7572492006-07-14 00:24:40 -0700653 nlmsg_free(rep_skb);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700654}
655
656static struct genl_ops taskstats_ops = {
657 .cmd = TASKSTATS_CMD_GET,
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700658 .doit = taskstats_user_cmd,
Shailabh Nagarc7572492006-07-14 00:24:40 -0700659 .policy = taskstats_cmd_get_policy,
660};
661
Balbir Singh846c7bb2007-10-18 23:39:44 -0700662static struct genl_ops cgroupstats_ops = {
663 .cmd = CGROUPSTATS_CMD_GET,
664 .doit = cgroupstats_user_cmd,
665 .policy = cgroupstats_cmd_get_policy,
666};
667
Shailabh Nagarc7572492006-07-14 00:24:40 -0700668/* Needed early in initialization */
669void __init taskstats_init_early(void)
670{
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700671 unsigned int i;
672
Christoph Lameter0a31bd52007-05-06 14:49:57 -0700673 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700674 for_each_possible_cpu(i) {
675 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
676 init_rwsem(&(per_cpu(listener_array, i).sem));
677 }
Shailabh Nagarc7572492006-07-14 00:24:40 -0700678}
679
680static int __init taskstats_init(void)
681{
682 int rc;
683
684 rc = genl_register_family(&family);
685 if (rc)
686 return rc;
687
688 rc = genl_register_ops(&family, &taskstats_ops);
689 if (rc < 0)
690 goto err;
691
Balbir Singh846c7bb2007-10-18 23:39:44 -0700692 rc = genl_register_ops(&family, &cgroupstats_ops);
693 if (rc < 0)
694 goto err_cgroup_ops;
695
Shailabh Nagarc7572492006-07-14 00:24:40 -0700696 family_registered = 1;
Mandeep Singh Bainesf9b182e2011-03-23 16:43:27 -0700697 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700698 return 0;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700699err_cgroup_ops:
700 genl_unregister_ops(&family, &taskstats_ops);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700701err:
702 genl_unregister_family(&family);
703 return rc;
704}
705
706/*
707 * late initcall ensures initialization of statistics collection
708 * mechanisms precedes initialization of the taskstats interface
709 */
710late_initcall(taskstats_init);